summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c6
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c26
-rw-r--r--drivers/acpi/dock.c3
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/evged.c2
-rw-r--r--drivers/acpi/fan.c1
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c12
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/pci_mcfg.c2
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/sbshc.h6
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/acpi/wakeup.c4
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/base/core.c10
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/power/runtime.c57
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/null_blk.h2
-rw-r--r--drivers/block/null_blk_zoned.c138
-rw-r--r--drivers/block/xsysace.c49
-rw-r--r--drivers/bluetooth/btintel.h2
-rw-r--r--drivers/char/tpm/eventlog/efi.c5
-rw-r--r--drivers/char/tpm/tpm_tis.c29
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_performance.c1
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c27
-rw-r--r--drivers/cpufreq/longhaul.c1
-rw-r--r--drivers/cpufreq/speedstep-lib.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c2
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/mic_x100_dma.c770
-rw-r--r--drivers/dma/mic_x100_dma.h275
-rw-r--r--drivers/firmware/arm_scmi/base.c2
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c8
-rw-r--r--drivers/firmware/arm_scmi/notify.c22
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/reset.c4
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/arm_scmi/smc.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c1
-rw-r--r--drivers/gpio/gpio-dwapb.c4
-rw-r--r--drivers/gpio/gpio-omap.c12
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c62
-rw-r--r--drivers/gpio/gpio-sifive.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.h15
-rw-r--r--drivers/gpio/gpiolib.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h33
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h4
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c11
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c12
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c7
-rw-r--r--drivers/gpu/drm/drm_prime.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c55
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h55
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c32
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c6
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c77
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c2
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c17
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c40
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c20
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c41
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c36
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c39
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c25
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c36
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h6
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h21
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c80
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c12
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c67
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c52
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c204
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c8
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c86
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/infiniband/core/cma.c48
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c13
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/interconnect/core.c3
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c7
-rw-r--r--drivers/interconnect/qcom/sc7180.c6
-rw-r--r--drivers/interconnect/qcom/sdm845.c8
-rw-r--r--drivers/interconnect/qcom/sm8150.c7
-rw-r--r--drivers/interconnect/qcom/sm8250.c7
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h6
-rw-r--r--drivers/iommu/intel/iommu.c8
-rw-r--r--drivers/iommu/intel/svm.c8
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/irqchip/Kconfig3
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-mst-intc.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/irqchip/irq-sifive-plic.c10
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c83
-rw-r--r--drivers/message/fusion/mptscsih.c13
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/mei/hw.h6
-rw-r--r--drivers/misc/mic/Kconfig141
-rw-r--r--drivers/misc/mic/Makefile12
-rw-r--r--drivers/misc/mic/bus/Makefile9
-rw-r--r--drivers/misc/mic/bus/cosm_bus.c130
-rw-r--r--drivers/misc/mic/bus/cosm_bus.h125
-rw-r--r--drivers/misc/mic/bus/mic_bus.c194
-rw-r--r--drivers/misc/mic/bus/scif_bus.c201
-rw-r--r--drivers/misc/mic/bus/scif_bus.h125
-rw-r--r--drivers/misc/mic/bus/vop_bus.c194
-rw-r--r--drivers/misc/mic/bus/vop_bus.h129
-rw-r--r--drivers/misc/mic/card/Makefile11
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c85
-rw-r--r--drivers/misc/mic/card/mic_device.c417
-rw-r--r--drivers/misc/mic/card/mic_device.h137
-rw-r--r--drivers/misc/mic/card/mic_x100.c347
-rw-r--r--drivers/misc/mic/card/mic_x100.h37
-rw-r--r--drivers/misc/mic/common/mic_dev.h55
-rw-r--r--drivers/misc/mic/cosm/Makefile11
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c116
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c382
-rw-r--r--drivers/misc/mic/cosm/cosm_main.h61
-rw-r--r--drivers/misc/mic/cosm/cosm_scif_server.c399
-rw-r--r--drivers/misc/mic/cosm/cosm_sysfs.c449
-rw-r--r--drivers/misc/mic/cosm_client/Makefile8
-rw-r--r--drivers/misc/mic/cosm_client/cosm_scif_client.c269
-rw-r--r--drivers/misc/mic/host/Makefile12
-rw-r--r--drivers/misc/mic/host/mic_boot.c588
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c149
-rw-r--r--drivers/misc/mic/host/mic_device.h157
-rw-r--r--drivers/misc/mic/host/mic_intr.c635
-rw-r--r--drivers/misc/mic/host/mic_intr.h137
-rw-r--r--drivers/misc/mic/host/mic_main.c335
-rw-r--r--drivers/misc/mic/host/mic_smpt.c427
-rw-r--r--drivers/misc/mic/host/mic_smpt.h87
-rw-r--r--drivers/misc/mic/host/mic_x100.c585
-rw-r--r--drivers/misc/mic/host/mic_x100.h77
-rw-r--r--drivers/misc/mic/scif/Makefile21
-rw-r--r--drivers/misc/mic/scif/scif_api.c1485
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c116
-rw-r--r--drivers/misc/mic/scif/scif_dma.c1940
-rw-r--r--drivers/misc/mic/scif/scif_epd.c357
-rw-r--r--drivers/misc/mic/scif/scif_epd.h200
-rw-r--r--drivers/misc/mic/scif/scif_fd.c462
-rw-r--r--drivers/misc/mic/scif/scif_fence.c783
-rw-r--r--drivers/misc/mic/scif/scif_main.c351
-rw-r--r--drivers/misc/mic/scif/scif_main.h274
-rw-r--r--drivers/misc/mic/scif/scif_map.h127
-rw-r--r--drivers/misc/mic/scif/scif_mmap.c690
-rw-r--r--drivers/misc/mic/scif/scif_nm.c229
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c1349
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.h221
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.c175
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.h23
-rw-r--r--drivers/misc/mic/scif/scif_ports.c116
-rw-r--r--drivers/misc/mic/scif/scif_rb.c240
-rw-r--r--drivers/misc/mic/scif/scif_rb.h100
-rw-r--r--drivers/misc/mic/scif/scif_rma.c1760
-rw-r--r--drivers/misc/mic/scif/scif_rma.h477
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c282
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.h48
-rw-r--r--drivers/misc/mic/vop/Makefile10
-rw-r--r--drivers/misc/mic/vop/vop_debugfs.c184
-rw-r--r--drivers/misc/mic/vop/vop_main.c784
-rw-r--r--drivers/misc/mic/vop/vop_main.h158
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c1166
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c30
-rw-r--r--drivers/mmc/host/sdhci.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c43
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c17
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c15
-rw-r--r--drivers/mtd/spi-nor/core.c13
-rw-r--r--drivers/net/can/dev.c14
-rw-r--r--drivers/net/can/flexcan.c12
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c11
-rw-r--r--drivers/net/can/rx-offload.c4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c22
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c18
-rw-r--r--drivers/net/can/ti_hecc.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c51
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c48
-rw-r--r--drivers/net/can/xilinx_can.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c4
-rw-r--r--drivers/net/dsa/qca8k.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c111
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c582
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c31
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c28
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c30
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c24
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c36
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c5
-rw-r--r--drivers/net/gtp.c16
-rw-r--r--drivers/net/ipa/gsi_trans.c21
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/phy/sfp.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c92
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvme/host/fc.c270
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvme/host/rdma.c26
-rw-r--r--drivers/nvme/host/tcp.c16
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/trace.h21
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/device.c6
-rw-r--r--drivers/of/of_reserved_mem.c13
-rw-r--r--drivers/opp/core.c9
-rw-r--r--drivers/opp/of.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c8
-rw-r--r--drivers/pci/controller/pci-mvebu.c23
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c40
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c72
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c32
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c18
-rw-r--r--drivers/pnp/core.c4
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/powercap/powercap_sys.c4
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c14
-rw-r--r--drivers/s390/crypto/pkey_api.c30
-rw-r--r--drivers/s390/crypto/zcrypt_card.c13
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c6
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c36
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c4
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c34
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/spi/spi-bcm2835.c15
-rw-r--r--drivers/spi/spi-fsl-dspi.c10
-rw-r--r--drivers/spi/spi-imx.c23
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c1
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-rx.c34
-rw-r--r--drivers/staging/octeon/ethernet.c9
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c19
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml2
-rw-r--r--drivers/staging/wfx/bh.c2
-rw-r--r--drivers/staging/wfx/data_tx.c6
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/tee/tee_core.c3
-rw-r--r--drivers/tty/serial/21285.c12
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c13
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/tty_io.c6
-rw-r--r--drivers/tty/vt/keyboard.c39
-rw-r--r--drivers/tty/vt/vt.c24
-rw-r--r--drivers/tty/vt/vt_ioctl.c47
-rw-r--r--drivers/usb/cdns3/ep0.c65
-rw-r--r--drivers/usb/cdns3/gadget.c115
-rw-r--r--drivers/usb/cdns3/gadget.h5
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/driver.c30
-rw-r--r--drivers/usb/core/generic.c4
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c5
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/host/ehci-tegra.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c9
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-pci.c17
-rw-r--r--drivers/usb/host/xhci.c5
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c21
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c1
-rw-r--r--drivers/usb/serial/cyberjack.c7
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/stusb160x.c24
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/vdpa/mlx5/core/mr.c5
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c33
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c10
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c43
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c17
-rw-r--r--drivers/vhost/vdpa.c173
-rw-r--r--drivers/video/fbdev/hyperv_fb.c10
-rw-r--r--drivers/xen/swiotlb-xen.c3
479 files changed, 4033 insertions, 24991 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index bc96457c9e25..a322a7bd286b 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -578,7 +578,7 @@ acpi_video_bqc_value_to_level(struct acpi_video_device *device,
ACPI_VIDEO_FIRST_LEVEL - 1 - bqc_value;
level = device->brightness->levels[bqc_value +
- ACPI_VIDEO_FIRST_LEVEL];
+ ACPI_VIDEO_FIRST_LEVEL];
} else {
level = bqc_value;
}
@@ -990,8 +990,8 @@ set_level:
goto out_free_levels;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n",
- br->count - ACPI_VIDEO_FIRST_LEVEL));
+ "found %d brightness levels\n",
+ br->count - ACPI_VIDEO_FIRST_LEVEL));
return 0;
out_free_levels:
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index cab4af532f36..08ee1c7b12e0 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -987,7 +987,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
*/
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
- (battery->capacity_now <= battery->alarm)))
+ (battery->capacity_now <= battery->alarm)))
acpi_pm_wakeup_event(&battery->device->dev);
return result;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index da4b125ab4c3..0d93a5ef4d07 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -74,19 +74,6 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids);
/* Please keep this list sorted alphabetically by vendor and model */
static const struct dmi_system_id dmi_lid_quirks[] = {
{
- /*
- * Acer Switch 10 SW5-012. _LID method messes with home and
- * power button GPIO IRQ settings causing an interrupt storm on
- * both GPIOs. This is unfixable without a DSDT override, so we
- * have to disable the lid-switch functionality altogether :|
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
- },
- .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
- },
- {
/* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
@@ -102,7 +89,18 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
+ {
+ /*
+ * Medion Akoya E2228T, notification of the LID device only
+ * happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 45d4b7b69de8..24e076f44d23 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -231,7 +231,8 @@ static void hot_remove_dock_devices(struct dock_station *ds)
* between them).
*/
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
- dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
+ dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST,
+ DOCK_CALL_HANDLER);
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
acpi_bus_trim(dd->adev);
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 4c1992fce150..5fca18296bf6 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -106,6 +106,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1045", 0},
+ {"INTC1049", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 06741305fc77..a24d5d7aa117 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -229,6 +229,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INT3532", 0},
{"INTC1047", 0},
{"INTC1050", 0},
+ {"INTC1060", 0},
+ {"INTC1061", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 8d420c7e7178..d14025a85ce8 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -25,10 +25,16 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INT340A"},
{"INT340B"},
{"INTC1040"},
+ {"INTC1041"},
{"INTC1043"},
{"INTC1044"},
{"INTC1045"},
+ {"INTC1046"},
{"INTC1047"},
+ {"INTC1048"},
+ {"INTC1049"},
+ {"INTC1060"},
+ {"INTC1061"},
{""},
};
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 170643927044..92e59f45329b 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -31,7 +31,7 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
event.type = type;
event.data = data;
return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ == NOTIFY_BAD) ? -EINVAL : 0;
}
EXPORT_SYMBOL(acpi_notifier_call_chain);
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index b1a7f8d6965e..fe6b6792c8bb 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -101,7 +101,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
switch (gsi) {
case 0 ... 255:
- sprintf(ev_name, "_%c%02hhX",
+ sprintf(ev_name, "_%c%02X",
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 62873388b24f..48354f82fba6 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,6 +27,7 @@ static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
{"INT3404", 0},
{"INTC1044", 0},
+ {"INTC1048", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 43411a7457cd..e3638bafb941 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -134,7 +134,7 @@ int acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
- int enable, int sleep_state, int dev_state);
+ int enable, int sleep_state, int dev_state);
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 756227837b3b..442608220b5c 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1564,7 +1564,7 @@ static ssize_t format1_show(struct device *dev,
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
- if (rc != ENXIO)
+ if (rc != -ENXIO)
break;
}
mutex_unlock(&acpi_desc->init_mutex);
@@ -2175,10 +2175,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
* these commands.
*/
enum nfit_aux_cmds {
- NFIT_CMD_TRANSLATE_SPA = 5,
- NFIT_CMD_ARS_INJECT_SET = 7,
- NFIT_CMD_ARS_INJECT_CLEAR = 8,
- NFIT_CMD_ARS_INJECT_GET = 9,
+ NFIT_CMD_TRANSLATE_SPA = 5,
+ NFIT_CMD_ARS_INJECT_SET = 7,
+ NFIT_CMD_ARS_INJECT_CLEAR = 8,
+ NFIT_CMD_ARS_INJECT_GET = 9,
};
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
@@ -2632,7 +2632,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
- nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
+ nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
if (!mmio->addr.base) {
dev_dbg(dev, "%s failed to map bdw\n",
nvdimm_name(nvdimm));
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index dea8a60e18a4..14ee631cb7cf 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -175,7 +175,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
* configure the IRQ assigned to this slot|dev|pin. The 'source_index'
* indicates which resource descriptor in the resource template (of
* the link device) this interrupt is allocated from.
- *
+ *
* NOTE: Don't query the Link Device for IRQ information at this time
* because Link Device enumeration may not have occurred yet
* (e.g. exists somewhere 'below' this _PRT entry in the ACPI
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 606da5d77ad3..fb4c5632a232 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -6,8 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
*
- * TBD:
- * 1. Support more than one IRQ resource entry per link device (index).
+ * TBD:
+ * 1. Support more than one IRQ resource entry per link device (index).
* 2. Implement start/stop mechanism and use ACPI Bus Driver facilities
* for IRQ management (e.g. start()->_SRS).
*/
@@ -249,8 +249,8 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
}
}
- /*
- * Query and parse _CRS to get the current IRQ assignment.
+ /*
+ * Query and parse _CRS to get the current IRQ assignment.
*/
status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS,
@@ -396,7 +396,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
/*
* "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt
* Link Devices to move the PIRQs around to minimize sharing.
- *
+ *
* "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs
* that the BIOS has already set to active. This is necessary because
* ACPI has no automatic means of knowing what ISA IRQs are used. Note that
@@ -414,7 +414,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
*
* Note that PCI IRQ routers have a list of possible IRQs,
* which may not include the IRQs this table says are available.
- *
+ *
* Since this heuristic can't tell the difference between a link
* that no device will attach to, vs. a link which may be shared
* by multiple active devices -- it is not optimal.
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 7ddd57abadd1..95f23acd5b80 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -173,7 +173,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
{
if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(f->oem_table_id, mcfg_oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE) &&
+ ACPI_OEM_TABLE_ID_SIZE) &&
f->oem_revision == mcfg_oem_revision &&
f->segment == segment &&
resource_contains(&f->bus_range, bus_range))
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 837b875d075e..8048da85b7e0 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -13,7 +13,7 @@
* 1. via "Device Specific (D-State) Control"
* 2. via "Power Resource Control".
* The code below deals with ACPI Power Resources control.
- *
+ *
* An ACPI "power resource object" represents a software controllable power
* plane, clock plane, or other resource depended on by a device.
*
@@ -645,7 +645,7 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
* -ENODEV if the execution of either _DSW or _PSW has failed
*/
int acpi_device_sleep_wake(struct acpi_device *dev,
- int enable, int sleep_state, int dev_state)
+ int enable, int sleep_state, int dev_state)
{
union acpi_object in_arg[3];
struct acpi_object_list arg_list = { 3, in_arg };
@@ -690,7 +690,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/*
* Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
- * 1. Power on the power resources required for the wakeup device
+ * 1. Power on the power resources required for the wakeup device
* 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present
*/
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 5909e8fa4013..b04a68950ff1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -354,7 +354,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
(u32) px->control, (u32) px->status));
/*
- * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
+ * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
*/
if (!px->core_frequency ||
((u32)(px->core_frequency * 1000) !=
@@ -627,7 +627,7 @@ int acpi_processor_preregister_performance(
goto err_ret;
/*
- * Now that we have _PSD data from all CPUs, lets setup P-state
+ * Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
@@ -693,7 +693,7 @@ int acpi_processor_preregister_performance(
if (match_pdomain->domain != pdomain->domain)
continue;
- match_pr->performance->shared_type =
+ match_pr->performance->shared_type =
pr->performance->shared_type;
cpumask_copy(match_pr->performance->shared_cpu_map,
pr->performance->shared_cpu_map);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index f158b8c30113..e6d9f4de2800 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -366,7 +366,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
state_readers[i].mode,
ACPI_SBS_BATTERY,
state_readers[i].command,
- (u8 *)battery +
+ (u8 *)battery +
state_readers[i].offset);
if (result)
goto end;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 87b74e9015e5..53c2862c4c75 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -176,7 +176,7 @@ int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
EXPORT_SYMBOL_GPL(acpi_smbus_write);
int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
- smbus_alarm_callback callback, void *context)
+ smbus_alarm_callback callback, void *context)
{
mutex_lock(&hc->lock);
hc->callback = callback;
diff --git a/drivers/acpi/sbshc.h b/drivers/acpi/sbshc.h
index c3522bb82792..695c390e2884 100644
--- a/drivers/acpi/sbshc.h
+++ b/drivers/acpi/sbshc.h
@@ -24,9 +24,9 @@ enum acpi_sbs_device_addr {
typedef void (*smbus_alarm_callback)(void *context);
extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
- u8 command, u8 * data);
+ u8 command, u8 *data);
extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
- u8 command, u8 * data, u8 length);
+ u8 command, u8 *data, u8 length);
extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
- smbus_alarm_callback callback, void *context);
+ smbus_alarm_callback callback, void *context);
extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a896e5e87c93..bc6a79e33220 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1453,7 +1453,7 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
}
/**
- * acpi_dma_configure - Set-up DMA configuration for the device.
+ * acpi_dma_configure_id - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
* @input_id: input device id const value pointer
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 3a032afd9d05..4f5463b2a217 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -178,14 +178,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
},
},
- {
- .callback = video_detect_force_video,
- .ident = "ThinkPad X201T",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
- },
- },
+ {
+ .callback = video_detect_force_video,
+ .ident = "ThinkPad X201T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
+ },
+ },
/* The native backlight controls do not work on some older machines */
{
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index f89dd9a99e6e..b02bf770aead 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -44,7 +44,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
- || dev->wakeup.prepare_count))
+ || dev->wakeup.prepare_count))
continue;
if (device_may_wakeup(&dev->dev))
@@ -69,7 +69,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
- || dev->wakeup.prepare_count))
+ || dev->wakeup.prepare_count))
continue;
acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index eb9dc14e5147..20190f66ced9 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2100,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
pp->dhfis_bits &= ~done_mask;
pp->dmafis_bits &= ~done_mask;
pp->sdbfis_bits |= done_mask;
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
DPRINTK("over\n");
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c852f16c111b..d661ada1518f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -773,8 +773,7 @@ static void __device_link_del(struct kref *kref)
dev_dbg(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_drop_link(link->consumer);
+ pm_runtime_drop_link(link);
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
@@ -788,8 +787,7 @@ static void __device_link_del(struct kref *kref)
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_drop_link(link->consumer);
+ pm_runtime_drop_link(link);
list_del(&link->s_node);
list_del(&link->c_node);
@@ -4264,6 +4262,7 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
+ struct device *parent = dev->parent;
struct fwnode_handle *fn = dev->fwnode;
if (fwnode) {
@@ -4278,7 +4277,8 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
- fn->secondary = NULL;
+ if (!(parent && fn == parent->fwnode))
+ fn->secondary = ERR_PTR(-ENODEV);
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b42229b74fd6..148e81969e04 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -1117,6 +1117,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
+ pm_runtime_get_sync(dev);
+
while (device_links_busy(dev)) {
__device_driver_unlock(dev, parent);
@@ -1128,13 +1130,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
* have released the driver successfully while this one
* was waiting, so check for that.
*/
- if (dev->driver != drv)
+ if (dev->driver != drv) {
+ pm_runtime_put(dev);
return;
+ }
}
- pm_runtime_get_sync(dev);
- pm_runtime_clean_up_links(dev);
-
driver_sysfs_remove(dev);
if (dev->bus)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6f605f7820bb..bfda153b1a41 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1643,42 +1643,6 @@ void pm_runtime_remove(struct device *dev)
}
/**
- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
- * @dev: Device whose driver is going to be removed.
- *
- * Check links from this device to any consumers and if any of them have active
- * runtime PM references to the device, drop the usage counter of the device
- * (as many times as needed).
- *
- * Links with the DL_FLAG_MANAGED flag unset are ignored.
- *
- * Since the device is guaranteed to be runtime-active at the point this is
- * called, nothing else needs to be done here.
- *
- * Moreover, this is called after device_links_busy() has returned 'false', so
- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
- * therefore rpm_active can't be manipulated concurrently.
- */
-void pm_runtime_clean_up_links(struct device *dev)
-{
- struct device_link *link;
- int idx;
-
- idx = device_links_read_lock();
-
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
- device_links_read_lock_held()) {
- if (!(link->flags & DL_FLAG_MANAGED))
- continue;
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put_noidle(dev);
- }
-
- device_links_read_unlock(idx);
-}
-
-/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
@@ -1729,7 +1693,7 @@ void pm_runtime_new_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
-void pm_runtime_drop_link(struct device *dev)
+static void pm_runtime_drop_link_count(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
@@ -1737,6 +1701,25 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+/**
+ * pm_runtime_drop_link - Prepare for device link removal.
+ * @link: Device link going away.
+ *
+ * Drop the link count of the consumer end of @link and decrement the supplier
+ * device's runtime PM usage counter as many times as needed to drop all of the
+ * PM runtime reference to it from the consumer.
+ */
+void pm_runtime_drop_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ return;
+
+ pm_runtime_drop_link_count(link->consumer);
+
+ while (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+}
+
static bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cb1191d6e945..a58084c2ed7c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -255,7 +255,8 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
bd_set_nr_sectors(bdev, size);
- set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
+ if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
static inline int
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 0bed21c0c81b..aaae9220f3a0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd)
}
}
-static void nbd_size_update(struct nbd_device *nbd)
+static void nbd_size_update(struct nbd_device *nbd, bool start)
{
struct nbd_config *config = nbd->config;
struct block_device *bdev = bdget_disk(nbd->disk, 0);
@@ -313,7 +313,8 @@ static void nbd_size_update(struct nbd_device *nbd)
if (bdev) {
if (bdev->bd_disk) {
bd_set_nr_sectors(bdev, nr_sectors);
- set_blocksize(bdev, config->blksize);
+ if (start)
+ set_blocksize(bdev, config->blksize);
} else
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
bdput(bdev);
@@ -328,7 +329,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
if (nbd->task_recv != NULL)
- nbd_size_update(nbd);
+ nbd_size_update(nbd, false);
}
static void nbd_complete_rq(struct request *req)
@@ -1308,7 +1309,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- nbd_size_update(nbd);
+ nbd_size_update(nbd, true);
return error;
}
@@ -1517,6 +1518,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
bdev->bd_openers == 0)
nbd_disconnect_and_put(nbd);
+ bdput(bdev);
nbd_config_put(nbd);
nbd_put(nbd);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index d2e7db43a52a..c24d9b5ad81a 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -47,6 +47,8 @@ struct nullb_device {
unsigned int nr_zones_closed;
struct blk_zone *zones;
sector_t zone_size_sects;
+ spinlock_t zone_lock;
+ unsigned long *zone_locks;
unsigned long size; /* device size in MB */
unsigned long completion_nsec; /* time in ns to complete a request */
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 7d94f2d47a6a..beb34b4f76b0 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include "null_blk.h"
#define CREATE_TRACE_POINTS
@@ -45,6 +46,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones)
return -ENOMEM;
+ /*
+ * With memory backing, the zone_lock spinlock needs to be temporarily
+ * released to avoid scheduling in atomic context. To guarantee zone
+ * information protection, use a bitmap to lock zones with
+ * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
+ * implies that the queue is marked with BLK_MQ_F_BLOCKING.
+ */
+ spin_lock_init(&dev->zone_lock);
+ if (dev->memory_backed) {
+ dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+ if (!dev->zone_locks) {
+ kvfree(dev->zones);
+ return -ENOMEM;
+ }
+ }
+
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u",
@@ -123,15 +140,31 @@ int null_register_zoned_dev(struct nullb *nullb)
void null_free_zoned_dev(struct nullb_device *dev)
{
+ bitmap_free(dev->zone_locks);
kvfree(dev->zones);
}
+static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ if (dev->memory_backed)
+ wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
+ spin_lock_irq(&dev->zone_lock);
+}
+
+static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ spin_unlock_irq(&dev->zone_lock);
+
+ if (dev->memory_backed)
+ clear_and_wake_up_bit(zno, dev->zone_locks);
+}
+
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int first_zone, i;
+ unsigned int first_zone, i, zno;
struct blk_zone zone;
int error;
@@ -142,15 +175,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones);
- for (i = 0; i < nr_zones; i++) {
+ zno = first_zone;
+ for (i = 0; i < nr_zones; i++, zno++) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone
* array.
*/
- memcpy(&zone, &dev->zones[first_zone + i],
- sizeof(struct blk_zone));
+ null_lock_zone(dev, zno);
+ memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
+ null_unlock_zone(dev, zno);
+
error = cb(&zone, i, data);
if (error)
return error;
@@ -159,6 +195,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
return nr_zones;
}
+/*
+ * This is called in the case of memory backing from null_process_cmd()
+ * with the target zone already locked.
+ */
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
@@ -295,22 +335,26 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
break;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
break;
default:
/* Invalid zone condition */
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
}
/*
@@ -326,11 +370,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
else
cmd->rq->__sector = sector;
} else if (sector != zone->wp) {
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
}
- if (zone->wp + nr_sectors > zone->start + zone->capacity)
- return BLK_STS_IOERR;
+ if (zone->wp + nr_sectors > zone->start + zone->capacity) {
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
if (zone->cond == BLK_ZONE_COND_CLOSED) {
dev->nr_zones_closed--;
@@ -341,9 +388,19 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ /*
+ * Memory backing allocation may sleep: release the zone_lock spinlock
+ * to avoid scheduling in atomic context. Zone operation atomicity is
+ * still guaranteed through the zone_locks bitmap.
+ */
+ if (dev->memory_backed)
+ spin_unlock_irq(&dev->zone_lock);
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ if (dev->memory_backed)
+ spin_lock_irq(&dev->zone_lock);
+
if (ret != BLK_STS_OK)
- return ret;
+ goto unlock;
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
@@ -353,7 +410,12 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
dev->nr_zones_imp_open--;
zone->cond = BLK_ZONE_COND_FULL;
}
- return BLK_STS_OK;
+ ret = BLK_STS_OK;
+
+unlock:
+ null_unlock_zone(dev, zno);
+
+ return ret;
}
static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
@@ -464,16 +526,30 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zone_no = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zone_no];
- blk_status_t ret = BLK_STS_OK;
+ unsigned int zone_no;
+ struct blk_zone *zone;
+ blk_status_t ret;
size_t i;
+ if (op == REQ_OP_ZONE_RESET_ALL) {
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
+ null_lock_zone(dev, i);
+ zone = &dev->zones[i];
+ if (zone->cond != BLK_ZONE_COND_EMPTY) {
+ null_reset_zone(dev, zone);
+ trace_nullb_zone_op(cmd, i, zone->cond);
+ }
+ null_unlock_zone(dev, i);
+ }
+ return BLK_STS_OK;
+ }
+
+ zone_no = null_zone_no(dev, sector);
+ zone = &dev->zones[zone_no];
+
+ null_lock_zone(dev, zone_no);
+
switch (op) {
- case REQ_OP_ZONE_RESET_ALL:
- for (i = dev->zone_nr_conv; i < dev->nr_zones; i++)
- null_reset_zone(dev, &dev->zones[i]);
- break;
case REQ_OP_ZONE_RESET:
ret = null_reset_zone(dev, zone);
break;
@@ -487,30 +563,44 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
ret = null_finish_zone(dev, zone);
break;
default:
- return BLK_STS_NOTSUPP;
+ ret = BLK_STS_NOTSUPP;
+ break;
}
if (ret == BLK_STS_OK)
trace_nullb_zone_op(cmd, zone_no, zone->cond);
+ null_unlock_zone(dev, zone_no);
+
return ret;
}
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors)
{
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int zno = null_zone_no(dev, sector);
+ blk_status_t sts;
+
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors, false);
+ sts = null_zone_write(cmd, sector, nr_sectors, false);
+ break;
case REQ_OP_ZONE_APPEND:
- return null_zone_write(cmd, sector, nr_sectors, true);
+ sts = null_zone_write(cmd, sector, nr_sectors, true);
+ break;
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- return null_zone_mgmt(cmd, op, sector);
+ sts = null_zone_mgmt(cmd, op, sector);
+ break;
default:
- return null_process_cmd(cmd, op, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+ sts = null_process_cmd(cmd, op, sector, nr_sectors);
+ null_unlock_zone(dev, zno);
}
+
+ return sts;
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 8d581c7536fb..eb8ef65778c3 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -443,22 +443,27 @@ static void ace_fix_driveid(u16 *id)
#define ACE_FSM_NUM_STATES 11
/* Set flag to exit FSM loop and reschedule tasklet */
-static inline void ace_fsm_yield(struct ace_device *ace)
+static inline void ace_fsm_yieldpoll(struct ace_device *ace)
{
- dev_dbg(ace->dev, "ace_fsm_yield()\n");
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
}
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "%s()\n", __func__);
+ ace_fsm_yieldpoll(ace);
+}
+
/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
- if (!ace->irq)
- /* No IRQ assigned, so need to poll */
- tasklet_schedule(&ace->fsm_tasklet);
- ace->fsm_continue_flag = 0;
+ if (ace->irq > 0)
+ ace->fsm_continue_flag = 0;
+ else
+ ace_fsm_yieldpoll(ace);
}
static bool ace_has_next_request(struct request_queue *q)
@@ -1053,12 +1058,12 @@ static int ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
- if (ace->irq) {
+ if (ace->irq > 0) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
- ace->irq = 0;
+ ace->irq = rc;
}
}
@@ -1110,7 +1115,7 @@ static void ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet);
- if (ace->irq)
+ if (ace->irq > 0)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
@@ -1123,11 +1128,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
int rc;
dev_dbg(dev, "ace_alloc(%p)\n", dev);
- if (!physaddr) {
- rc = -ENODEV;
- goto err_noreg;
- }
-
/* Allocate and initialize the ace device structure */
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace) {
@@ -1153,7 +1153,6 @@ err_setup:
dev_set_drvdata(dev, NULL);
kfree(ace);
err_alloc:
-err_noreg:
dev_err(dev, "could not initialize device, err=%i\n", rc);
return rc;
}
@@ -1176,10 +1175,11 @@ static void ace_free(struct device *dev)
static int ace_probe(struct platform_device *dev)
{
- resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
+ resource_size_t physaddr;
+ struct resource *res;
u32 id = dev->id;
- int irq = 0;
+ int irq;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
@@ -1190,12 +1190,15 @@ static int ace_probe(struct platform_device *dev)
if (of_find_property(dev->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
- for (i = 0; i < dev->num_resources; i++) {
- if (dev->resource[i].flags & IORESOURCE_MEM)
- physaddr = dev->resource[i].start;
- if (dev->resource[i].flags & IORESOURCE_IRQ)
- irq = dev->resource[i].start;
- }
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ physaddr = res->start;
+ if (!physaddr)
+ return -ENODEV;
+
+ irq = platform_get_irq_optional(dev, 0);
/* Call the bus-independent setup code */
return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 09346ae308eb..78cc64b42b30 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -47,7 +47,7 @@ enum {
struct intel_tlv {
u8 type;
u8 len;
- u8 val[0];
+ u8 val[];
} __packed;
struct intel_version_tlv {
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 6bb023de17f1..35229e5143ca 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
log_size = log_tbl->size;
memunmap(log_tbl);
+ if (!log_size) {
+ pr_warn("UEFI TPM log area empty\n");
+ return -EIO;
+ }
+
log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
MEMREMAP_WB);
if (!log_tbl) {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0b214963539d..4ed6e660273a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/kernel.h>
+#include <linux/dmi.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
return container_of(data, struct tpm_tis_tcg_phy, priv);
}
-static bool interrupts = true;
-module_param(interrupts, bool, 0444);
+static int interrupts = -1;
+module_param(interrupts, int, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
static bool itpm;
@@ -63,6 +64,28 @@ module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
#endif
+static int tpm_tis_disable_irq(const struct dmi_system_id *d)
+{
+ if (interrupts == -1) {
+ pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
+ interrupts = 0;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id tpm_tis_dmi_table[] = {
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkPad T490s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
+ },
+ },
+ {}
+};
+
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int has_hid(struct acpi_device *dev, const char *hid)
{
@@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
int irq = -1;
int rc;
+ dmi_check_system(tpm_tis_dmi_table);
+
rc = check_acpi_tpm2(dev);
if (rc)
return rc;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2c7171e0b001..85de313ddec2 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,6 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
config CPU_FREQ_DEFAULT_GOV_ONDEMAND
bool "ondemand"
+ depends on !(X86_INTEL_PSTATE && SMP)
select CPU_FREQ_GOV_ONDEMAND
select CPU_FREQ_GOV_PERFORMANCE
help
@@ -83,6 +84,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND
config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
bool "conservative"
+ depends on !(X86_INTEL_PSTATE && SMP)
select CPU_FREQ_GOV_CONSERVATIVE
select CPU_FREQ_GOV_PERFORMANCE
help
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f4b60663efe6..1e7e3f2ff09f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1908,6 +1908,18 @@ void cpufreq_resume(void)
}
/**
+ * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
+ * @flags: Flags to test against the current cpufreq driver's flags.
+ *
+ * Assumes that the driver is there, so callers must ensure that this is the
+ * case.
+ */
+bool cpufreq_driver_test_flags(u16 flags)
+{
+ return !!(cpufreq_driver->flags & flags);
+}
+
+/**
* cpufreq_get_current_driver - return current driver's name
*
* Return the name string of the currently loaded cpufreq driver
@@ -2187,7 +2199,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
* exactly same freq is called again and so we can save on few function
* calls.
*/
- if (target_freq == policy->cur)
+ if (target_freq == policy->cur &&
+ !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
return 0;
/* Save last value to restore later on errors */
@@ -2241,7 +2254,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
return -EINVAL;
/* Platform doesn't want dynamic frequency switching ? */
- if (policy->governor->dynamic_switching &&
+ if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
struct cpufreq_governor *gov = cpufreq_fallback_governor();
@@ -2267,6 +2280,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
}
}
+ policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
+
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index c56773c25757..bab8e6140377 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
{ \
.name = _name_, \
- .dynamic_switching = true, \
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
.owner = THIS_MODULE, \
.init = cpufreq_dbs_governor_init, \
.exit = cpufreq_dbs_governor_exit, \
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index 71c1d9aba772..addd93f2a420 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
static struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
.limits = cpufreq_gov_performance_limits,
};
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 7749522355b5..8d830d860e91 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.limits = cpufreq_gov_powersave_limits,
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 776a58bab0ff..ab93bce8ae77 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -223,7 +223,6 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
case EPS_BRAND_C3:
pr_cont("C3\n");
return -ENODEV;
- break;
}
/* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3c1455518738..36a3ccfe6d3d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2527,7 +2527,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
}
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
- bool fast_switch)
+ bool strict, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
@@ -2539,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
* field in it, so opportunistically update the max too if needed.
*/
value &= ~HWP_MAX_PERF(~0L);
- value |= HWP_MAX_PERF(cpu->max_perf_ratio);
+ value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
if (value == prev)
return;
@@ -2562,20 +2562,20 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
pstate_funcs.get_val(cpu, target_pstate));
}
-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
- bool fast_switch)
+static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
+ int target_pstate, bool fast_switch)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
- if (target_pstate != old_pstate) {
+ if (hwp_active) {
+ intel_cpufreq_adjust_hwp(cpu, target_pstate,
+ policy->strict_target, fast_switch);
+ cpu->pstate.current_pstate = target_pstate;
+ } else if (target_pstate != old_pstate) {
+ intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
cpu->pstate.current_pstate = target_pstate;
- if (hwp_active)
- intel_cpufreq_adjust_hwp(cpu, target_pstate,
- fast_switch);
- else
- intel_cpufreq_adjust_perf_ctl(cpu, target_pstate,
- fast_switch);
}
intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
@@ -2611,7 +2611,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
break;
}
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2630,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
return target_pstate * cpu->pstate.scaling;
}
@@ -3032,6 +3032,7 @@ static int __init intel_pstate_init(void)
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
+ intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
if (!default_driver)
default_driver = &intel_pstate;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 123fb006810d..182a4dbca095 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -593,7 +593,6 @@ static void longhaul_setup_voltagescaling(void)
break;
default:
return;
- break;
}
if (min_vid_speed >= highest_speed)
return;
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index a13a2d1e444e..0b66df4ed513 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -240,7 +240,7 @@ unsigned int speedstep_get_frequency(enum speedstep_processor processor)
return pentium3_get_frequency(processor);
default:
return 0;
- };
+ }
return 0;
}
EXPORT_SYMBOL_GPL(speedstep_get_frequency);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index fa2f1b4fad7b..a94bf28f858a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -7,7 +7,7 @@
*
* This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
*
- * You could find the datasheet in Documentation/arm/sunxi/README
+ * You could find the datasheet in Documentation/arm/sunxi.rst
*/
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index 78503006949c..cfde9ee4356b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -7,7 +7,7 @@
*
* This file handle the PRNG
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include "sun8i-ce.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 654328160d19..5b7af4498bd5 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -7,7 +7,7 @@
*
* This file handle the TRNG
*
- * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/
#include "sun8i-ce.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 518a1437862a..90284ffda58a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,24 +318,6 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
-config INTEL_MIC_X100_DMA
- tristate "Intel MIC X100 DMA Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS
- select DMA_ENGINE
- help
- This enables DMA support for the Intel Many Integrated Core
- (MIC) family of PCIe form factor coprocessor X100 devices that
- run a 64 bit Linux OS. This driver will be used by both MIC
- host and card drivers.
-
- If you are building host kernel with a MIC device or a card
- kernel for a MIC device, then say M (recommended) or Y, else
- say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
config K3_DMA
tristate "Hisilicon K3 DMA support"
depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index e60f81331d4c..948a8da05f8b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
deleted file mode 100644
index fea8608a7810..000000000000
--- a/drivers/dma/mic_x100_dma.c
+++ /dev/null
@@ -1,770 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC X100 DMA Driver.
- *
- * Adapted from IOAT dma driver.
- */
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-
-#include "mic_x100_dma.h"
-
-#define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\
- MIC_DMA_ALIGN_BYTES)
-#define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1)
-#define MIC_DMA_DESC_TYPE_SHIFT 60
-#define MIC_DMA_MEMCPY_LEN_SHIFT 46
-#define MIC_DMA_STAT_INTR_SHIFT 59
-
-/* high-water mark for pushing dma descriptors */
-static int mic_dma_pending_level = 4;
-
-/* Status descriptor is used to write a 64 bit value to a memory location */
-enum mic_dma_desc_format_type {
- MIC_DMA_MEMCPY = 1,
- MIC_DMA_STATUS,
-};
-
-static inline u32 mic_dma_hw_ring_inc(u32 val)
-{
- return (val + 1) % MIC_DMA_DESC_RX_SIZE;
-}
-
-static inline u32 mic_dma_hw_ring_dec(u32 val)
-{
- return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1;
-}
-
-static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch)
-{
- ch->head = mic_dma_hw_ring_inc(ch->head);
-}
-
-/* Prepare a memcpy desc */
-static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc,
- dma_addr_t src_phys, dma_addr_t dst_phys, u64 size)
-{
- u64 qw0, qw1;
-
- qw0 = src_phys;
- qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT;
- qw1 = MIC_DMA_MEMCPY;
- qw1 <<= MIC_DMA_DESC_TYPE_SHIFT;
- qw1 |= dst_phys;
- desc->qw0 = qw0;
- desc->qw1 = qw1;
-}
-
-/* Prepare a status desc. with @data to be written at @dst_phys */
-static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data,
- dma_addr_t dst_phys, bool generate_intr)
-{
- u64 qw0, qw1;
-
- qw0 = data;
- qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys;
- if (generate_intr)
- qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT);
- desc->qw0 = qw0;
- desc->qw1 = qw1;
-}
-
-static void mic_dma_cleanup(struct mic_dma_chan *ch)
-{
- struct dma_async_tx_descriptor *tx;
- u32 tail;
- u32 last_tail;
-
- spin_lock(&ch->cleanup_lock);
- tail = mic_dma_read_cmp_cnt(ch);
- /*
- * This is the barrier pair for smp_wmb() in fn.
- * mic_dma_tx_submit_unlock. It's required so that we read the
- * updated cookie value from tx->cookie.
- */
- smp_rmb();
- for (last_tail = ch->last_tail; tail != last_tail;) {
- tx = &ch->tx_array[last_tail];
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dmaengine_desc_get_callback_invoke(tx, NULL);
- tx->callback = NULL;
- }
- last_tail = mic_dma_hw_ring_inc(last_tail);
- }
- /* finish all completion callbacks before incrementing tail */
- smp_mb();
- ch->last_tail = last_tail;
- spin_unlock(&ch->cleanup_lock);
-}
-
-static u32 mic_dma_ring_count(u32 head, u32 tail)
-{
- u32 count;
-
- if (head >= tail)
- count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head);
- else
- count = tail - head;
- return count - 1;
-}
-
-/* Returns the num. of free descriptors on success, -ENOMEM on failure */
-static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required)
-{
- struct device *dev = mic_dma_ch_to_device(ch);
- u32 count;
-
- count = mic_dma_ring_count(ch->head, ch->last_tail);
- if (count < required) {
- mic_dma_cleanup(ch);
- count = mic_dma_ring_count(ch->head, ch->last_tail);
- }
-
- if (count < required) {
- dev_dbg(dev, "Not enough desc space");
- dev_dbg(dev, "%s %d required=%u, avail=%u\n",
- __func__, __LINE__, required, count);
- return -ENOMEM;
- } else {
- return count;
- }
-}
-
-/* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
-static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src,
- dma_addr_t dst, size_t len)
-{
- size_t current_transfer_len;
- size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size;
- /* 3 is added to make sure we have enough space for status desc */
- int num_desc = len / max_xfer_size + 3;
- int ret;
-
- if (len % max_xfer_size)
- num_desc++;
-
- ret = mic_dma_avail_desc_ring_space(ch, num_desc);
- if (ret < 0)
- return ret;
- do {
- current_transfer_len = min(len, max_xfer_size);
- mic_dma_memcpy_desc(&ch->desc_ring[ch->head],
- src, dst, current_transfer_len);
- mic_dma_hw_ring_inc_head(ch);
- len -= current_transfer_len;
- dst = dst + current_transfer_len;
- src = src + current_transfer_len;
- } while (len > 0);
- return 0;
-}
-
-/* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
-static void mic_dma_prog_intr(struct mic_dma_chan *ch)
-{
- mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
- ch->status_dest_micpa, false);
- mic_dma_hw_ring_inc_head(ch);
- mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
- ch->status_dest_micpa, true);
- mic_dma_hw_ring_inc_head(ch);
-}
-
-/* Wrapper function to program memcpy descriptors/status descriptors */
-static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
- dma_addr_t dst, size_t len)
-{
- if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) {
- return -ENOMEM;
- } else {
- /* 3 is the maximum number of status descriptors */
- int ret = mic_dma_avail_desc_ring_space(ch, 3);
-
- if (ret < 0)
- return ret;
- }
-
- /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
- if (flags & DMA_PREP_FENCE) {
- mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
- ch->status_dest_micpa, false);
- mic_dma_hw_ring_inc_head(ch);
- }
-
- if (flags & DMA_PREP_INTERRUPT)
- mic_dma_prog_intr(ch);
-
- return 0;
-}
-
-static inline void mic_dma_issue_pending(struct dma_chan *ch)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
-
- spin_lock(&mic_ch->issue_lock);
- /*
- * Write to head triggers h/w to act on the descriptors.
- * On MIC, writing the same head value twice causes
- * a h/w error. On second write, h/w assumes we filled
- * the entire ring & overwrote some of the descriptors.
- */
- if (mic_ch->issued == mic_ch->submitted)
- goto out;
- mic_ch->issued = mic_ch->submitted;
- /*
- * make descriptor updates visible before advancing head,
- * this is purposefully not smp_wmb() since we are also
- * publishing the descriptor updates to a dma device
- */
- wmb();
- mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued);
-out:
- spin_unlock(&mic_ch->issue_lock);
-}
-
-static inline void mic_dma_update_pending(struct mic_dma_chan *ch)
-{
- if (mic_dma_ring_count(ch->issued, ch->submitted)
- > mic_dma_pending_level)
- mic_dma_issue_pending(&ch->api_ch);
-}
-
-static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan);
- dma_cookie_t cookie;
-
- dma_cookie_assign(tx);
- cookie = tx->cookie;
- /*
- * We need an smp write barrier here because another CPU might see
- * an update to submitted and update h/w head even before we
- * assigned a cookie to this tx.
- */
- smp_wmb();
- mic_ch->submitted = mic_ch->head;
- spin_unlock(&mic_ch->prep_lock);
- mic_dma_update_pending(mic_ch);
- return cookie;
-}
-
-static inline struct dma_async_tx_descriptor *
-allocate_tx(struct mic_dma_chan *ch)
-{
- u32 idx = mic_dma_hw_ring_dec(ch->head);
- struct dma_async_tx_descriptor *tx = &ch->tx_array[idx];
-
- dma_async_tx_descriptor_init(tx, &ch->api_ch);
- tx->tx_submit = mic_dma_tx_submit_unlock;
- return tx;
-}
-
-/* Program a status descriptor with dst as address and value to be written */
-static struct dma_async_tx_descriptor *
-mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val,
- unsigned long flags)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- int result;
-
- spin_lock(&mic_ch->prep_lock);
- result = mic_dma_avail_desc_ring_space(mic_ch, 4);
- if (result < 0)
- goto error;
- mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst,
- false);
- mic_dma_hw_ring_inc_head(mic_ch);
- result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
- if (result < 0)
- goto error;
-
- return allocate_tx(mic_ch);
-error:
- dev_err(mic_dma_ch_to_device(mic_ch),
- "Error enqueueing dma status descriptor, error=%d\n", result);
- spin_unlock(&mic_ch->prep_lock);
- return NULL;
-}
-
-/*
- * Prepare a memcpy descriptor to be added to the ring.
- * Note that the temporary descriptor adds an extra overhead of copying the
- * descriptor to ring. So, we copy directly to the descriptor ring
- */
-static struct dma_async_tx_descriptor *
-mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
- dma_addr_t dma_src, size_t len, unsigned long flags)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- struct device *dev = mic_dma_ch_to_device(mic_ch);
- int result;
-
- if (!len && !flags)
- return NULL;
-
- spin_lock(&mic_ch->prep_lock);
- result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
- if (result >= 0)
- return allocate_tx(mic_ch);
- dev_err(dev, "Error enqueueing dma, error=%d\n", result);
- spin_unlock(&mic_ch->prep_lock);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- int ret;
-
- spin_lock(&mic_ch->prep_lock);
- ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
- if (!ret)
- return allocate_tx(mic_ch);
- spin_unlock(&mic_ch->prep_lock);
- return NULL;
-}
-
-/* Return the status of the transaction */
-static enum dma_status
-mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
-
- if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate))
- mic_dma_cleanup(mic_ch);
-
- return dma_cookie_status(ch, cookie, txstate);
-}
-
-static irqreturn_t mic_dma_thread_fn(int irq, void *data)
-{
- mic_dma_cleanup((struct mic_dma_chan *)data);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mic_dma_intr_handler(int irq, void *data)
-{
- struct mic_dma_chan *ch = ((struct mic_dma_chan *)data);
-
- mic_dma_ack_interrupt(ch);
- return IRQ_WAKE_THREAD;
-}
-
-static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch)
-{
- u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
- struct device *dev = &to_mbus_device(ch)->dev;
-
- desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
- ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL);
-
- if (!ch->desc_ring)
- return -ENOMEM;
-
- ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring,
- desc_ring_size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, ch->desc_ring_micpa))
- goto map_error;
-
- ch->tx_array = vzalloc(array_size(MIC_DMA_DESC_RX_SIZE,
- sizeof(*ch->tx_array)));
- if (!ch->tx_array)
- goto tx_error;
- return 0;
-tx_error:
- dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size,
- DMA_BIDIRECTIONAL);
-map_error:
- kfree(ch->desc_ring);
- return -ENOMEM;
-}
-
-static void mic_dma_free_desc_ring(struct mic_dma_chan *ch)
-{
- u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
-
- vfree(ch->tx_array);
- desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
- dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa,
- desc_ring_size, DMA_BIDIRECTIONAL);
- kfree(ch->desc_ring);
- ch->desc_ring = NULL;
-}
-
-static void mic_dma_free_status_dest(struct mic_dma_chan *ch)
-{
- dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa,
- L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
- kfree(ch->status_dest);
-}
-
-static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch)
-{
- struct device *dev = &to_mbus_device(ch)->dev;
-
- ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL);
- if (!ch->status_dest)
- return -ENOMEM;
- ch->status_dest_micpa = dma_map_single(dev, ch->status_dest,
- L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, ch->status_dest_micpa)) {
- kfree(ch->status_dest);
- ch->status_dest = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static int mic_dma_check_chan(struct mic_dma_chan *ch)
-{
- if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) ||
- mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) {
- mic_dma_disable_chan(ch);
- mic_dma_chan_mask_intr(ch);
- dev_err(mic_dma_ch_to_device(ch),
- "%s %d error setting up mic dma chan %d\n",
- __func__, __LINE__, ch->ch_num);
- return -EBUSY;
- }
- return 0;
-}
-
-static int mic_dma_chan_setup(struct mic_dma_chan *ch)
-{
- if (MIC_DMA_CHAN_MIC == ch->owner)
- mic_dma_chan_set_owner(ch);
- mic_dma_disable_chan(ch);
- mic_dma_chan_mask_intr(ch);
- mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0);
- mic_dma_chan_set_desc_ring(ch);
- ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR);
- ch->head = ch->last_tail;
- ch->issued = 0;
- mic_dma_chan_unmask_intr(ch);
- mic_dma_enable_chan(ch);
- return mic_dma_check_chan(ch);
-}
-
-static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
-{
- mic_dma_disable_chan(ch);
- mic_dma_chan_mask_intr(ch);
-}
-
-static int mic_dma_setup_irq(struct mic_dma_chan *ch)
-{
- ch->cookie =
- to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
- mic_dma_intr_handler, mic_dma_thread_fn,
- "mic dma_channel", ch, ch->ch_num);
- return PTR_ERR_OR_ZERO(ch->cookie);
-}
-
-static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
-{
- to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch);
-}
-
-static int mic_dma_chan_init(struct mic_dma_chan *ch)
-{
- int ret = mic_dma_alloc_desc_ring(ch);
-
- if (ret)
- goto ring_error;
- ret = mic_dma_alloc_status_dest(ch);
- if (ret)
- goto status_error;
- ret = mic_dma_chan_setup(ch);
- if (ret)
- goto chan_error;
- return ret;
-chan_error:
- mic_dma_free_status_dest(ch);
-status_error:
- mic_dma_free_desc_ring(ch);
-ring_error:
- return ret;
-}
-
-static int mic_dma_drain_chan(struct mic_dma_chan *ch)
-{
- struct dma_async_tx_descriptor *tx;
- int err = 0;
- dma_cookie_t cookie;
-
- tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- goto error;
- }
-
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie))
- err = -ENOMEM;
- else
- err = dma_sync_wait(&ch->api_ch, cookie);
- if (err) {
- dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n",
- __func__, __LINE__, ch->ch_num);
- err = -EIO;
- }
-error:
- mic_dma_cleanup(ch);
- return err;
-}
-
-static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch)
-{
- mic_dma_chan_destroy(ch);
- mic_dma_cleanup(ch);
- mic_dma_free_status_dest(ch);
- mic_dma_free_desc_ring(ch);
-}
-
-static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
- enum mic_dma_chan_owner owner)
-{
- int i, first_chan = mic_dma_dev->start_ch;
- struct mic_dma_chan *ch;
- int ret;
-
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- ch = &mic_dma_dev->mic_ch[i];
- ch->ch_num = i;
- ch->owner = owner;
- spin_lock_init(&ch->cleanup_lock);
- spin_lock_init(&ch->prep_lock);
- spin_lock_init(&ch->issue_lock);
- ret = mic_dma_setup_irq(ch);
- if (ret)
- goto error;
- }
- return 0;
-error:
- for (i = i - 1; i >= first_chan; i--)
- mic_dma_free_irq(ch);
- return ret;
-}
-
-static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev)
-{
- int i, first_chan = mic_dma_dev->start_ch;
- struct mic_dma_chan *ch;
-
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- ch = &mic_dma_dev->mic_ch[i];
- mic_dma_free_irq(ch);
- }
-}
-
-static int mic_dma_alloc_chan_resources(struct dma_chan *ch)
-{
- int ret = mic_dma_chan_init(to_mic_dma_chan(ch));
- if (ret)
- return ret;
- return MIC_DMA_DESC_RX_SIZE;
-}
-
-static void mic_dma_free_chan_resources(struct dma_chan *ch)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- mic_dma_drain_chan(mic_ch);
- mic_dma_chan_uninit(mic_ch);
-}
-
-/* Set the fn. handlers and register the dma device with dma api */
-static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
- enum mic_dma_chan_owner owner)
-{
- int i, first_chan = mic_dma_dev->start_ch;
-
- dma_cap_zero(mic_dma_dev->dma_dev.cap_mask);
- /*
- * This dma engine is not capable of host memory to host memory
- * transfers
- */
- dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask);
-
- if (MIC_DMA_CHAN_HOST == owner)
- dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask);
- mic_dma_dev->dma_dev.device_alloc_chan_resources =
- mic_dma_alloc_chan_resources;
- mic_dma_dev->dma_dev.device_free_chan_resources =
- mic_dma_free_chan_resources;
- mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
- mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
- mic_dma_dev->dma_dev.device_prep_dma_imm_data =
- mic_dma_prep_status_lock;
- mic_dma_dev->dma_dev.device_prep_dma_interrupt =
- mic_dma_prep_interrupt_lock;
- mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
- mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT;
- INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels);
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev;
- dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch);
- list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
- &mic_dma_dev->dma_dev.channels);
- }
- return dmaenginem_async_device_register(&mic_dma_dev->dma_dev);
-}
-
-/*
- * Initializes dma channels and registers the dma device with the
- * dma engine api.
- */
-static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
- enum mic_dma_chan_owner owner)
-{
- struct mic_dma_device *mic_dma_dev;
- int ret;
- struct device *dev = &mbdev->dev;
-
- mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
- if (!mic_dma_dev) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- mic_dma_dev->mbdev = mbdev;
- mic_dma_dev->dma_dev.dev = dev;
- mic_dma_dev->mmio = mbdev->mmio_va;
- if (MIC_DMA_CHAN_HOST == owner) {
- mic_dma_dev->start_ch = 0;
- mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST;
- } else {
- mic_dma_dev->start_ch = 4;
- mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD;
- }
- ret = mic_dma_init(mic_dma_dev, owner);
- if (ret)
- goto init_error;
- ret = mic_dma_register_dma_device(mic_dma_dev, owner);
- if (ret)
- goto reg_error;
- return mic_dma_dev;
-reg_error:
- mic_dma_uninit(mic_dma_dev);
-init_error:
- mic_dma_dev = NULL;
-alloc_error:
- dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
- return mic_dma_dev;
-}
-
-static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
-{
- mic_dma_uninit(mic_dma_dev);
-}
-
-/* DEBUGFS CODE */
-static int mic_dma_reg_show(struct seq_file *s, void *pos)
-{
- struct mic_dma_device *mic_dma_dev = s->private;
- int i, chan_num, first_chan = mic_dma_dev->start_ch;
- struct mic_dma_chan *ch;
-
- seq_printf(s, "SBOX_DCR: %#x\n",
- mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan],
- MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR));
- seq_puts(s, "DMA Channel Registers\n");
- seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s",
- "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
- seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- ch = &mic_dma_dev->mic_ch[i];
- chan_num = ch->ch_num;
- seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x",
- chan_num,
- mic_dma_read_reg(ch, MIC_DMA_REG_DCAR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DTPR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DHPR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI));
- seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n",
- mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO),
- mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK),
- mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT));
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_dma_reg);
-
-/* Debugfs parent dir */
-static struct dentry *mic_dma_dbg;
-
-static int mic_dma_driver_probe(struct mbus_device *mbdev)
-{
- struct mic_dma_device *mic_dma_dev;
- enum mic_dma_chan_owner owner;
-
- if (MBUS_DEV_DMA_MIC == mbdev->id.device)
- owner = MIC_DMA_CHAN_MIC;
- else
- owner = MIC_DMA_CHAN_HOST;
-
- mic_dma_dev = mic_dma_dev_reg(mbdev, owner);
- dev_set_drvdata(&mbdev->dev, mic_dma_dev);
-
- if (mic_dma_dbg) {
- mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
- mic_dma_dbg);
- debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir,
- mic_dma_dev, &mic_dma_reg_fops);
- }
- return 0;
-}
-
-static void mic_dma_driver_remove(struct mbus_device *mbdev)
-{
- struct mic_dma_device *mic_dma_dev;
-
- mic_dma_dev = dev_get_drvdata(&mbdev->dev);
- debugfs_remove_recursive(mic_dma_dev->dbg_dir);
- mic_dma_dev_unreg(mic_dma_dev);
-}
-
-static struct mbus_device_id id_table[] = {
- {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID},
- {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID},
- {0},
-};
-
-static struct mbus_driver mic_dma_driver = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = mic_dma_driver_probe,
- .remove = mic_dma_driver_remove,
-};
-
-static int __init mic_x100_dma_init(void)
-{
- int rc = mbus_register_driver(&mic_dma_driver);
- if (rc)
- return rc;
- mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
- return 0;
-}
-
-static void __exit mic_x100_dma_exit(void)
-{
- debugfs_remove_recursive(mic_dma_dbg);
- mbus_unregister_driver(&mic_dma_driver);
-}
-
-module_init(mic_x100_dma_init);
-module_exit(mic_x100_dma_exit);
-
-MODULE_DEVICE_TABLE(mbus, id_table);
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
deleted file mode 100644
index 68ef43a91714..000000000000
--- a/drivers/dma/mic_x100_dma.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC X100 DMA Driver.
- *
- * Adapted from IOAT dma driver.
- */
-#ifndef _MIC_X100_DMA_H_
-#define _MIC_X100_DMA_H_
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/mic_bus.h>
-
-#include "dmaengine.h"
-
-/*
- * MIC has a total of 8 dma channels.
- * Four channels are assigned for host SW use & the remaining for MIC SW.
- * MIC DMA transfer size & addresses need to be 64 byte aligned.
- */
-#define MIC_DMA_MAX_NUM_CHAN 8
-#define MIC_DMA_NUM_CHAN 4
-#define MIC_DMA_ALIGN_SHIFT DMAENGINE_ALIGN_64_BYTES
-#define MIC_DMA_ALIGN_BYTES (1 << MIC_DMA_ALIGN_SHIFT)
-#define MIC_DMA_DESC_RX_SIZE (128 * 1024 - 4)
-
-/*
- * Register descriptions
- * All the registers are 32 bit registers.
- * DCR is a global register and all others are per-channel.
- * DCR - bits 0, 2, 4, 6, 8, 10, 12, 14 - enable bits for channels 0 to 7
- * bits 1, 3, 5, 7, 9, 11, 13, 15 - owner bits for channels 0 to 7
- * DCAR - bit 24 & 25 interrupt masks for mic owned & host owned channels
- * DHPR - head of the descriptor ring updated by s/w
- * DTPR - tail of the descriptor ring updated by h/w
- * DRAR_LO - lower 32 bits of descriptor ring's mic address
- * DRAR_HI - 3:0 - remaining 4 bits of descriptor ring's mic address
- * 20:4 descriptor ring size
- * 25:21 mic smpt entry number
- * DSTAT - 16:0 h/w completion count; 31:28 dma engine status
- * DCHERR - this register is non-zero on error
- * DCHERRMSK - interrupt mask register
- */
-#define MIC_DMA_HW_CMP_CNT_MASK 0x1ffff
-#define MIC_DMA_CHAN_QUIESCE 0x20000000
-#define MIC_DMA_SBOX_BASE 0x00010000
-#define MIC_DMA_SBOX_DCR 0x0000A280
-#define MIC_DMA_SBOX_CH_BASE 0x0001A000
-#define MIC_DMA_SBOX_CHAN_OFF 0x40
-#define MIC_DMA_SBOX_DCAR_IM0 (0x1 << 24)
-#define MIC_DMA_SBOX_DCAR_IM1 (0x1 << 25)
-#define MIC_DMA_SBOX_DRARHI_SYS_MASK (0x1 << 26)
-#define MIC_DMA_REG_DCAR 0
-#define MIC_DMA_REG_DHPR 4
-#define MIC_DMA_REG_DTPR 8
-#define MIC_DMA_REG_DRAR_LO 20
-#define MIC_DMA_REG_DRAR_HI 24
-#define MIC_DMA_REG_DSTAT 32
-#define MIC_DMA_REG_DCHERR 44
-#define MIC_DMA_REG_DCHERRMSK 48
-
-/* HW dma desc */
-struct mic_dma_desc {
- u64 qw0;
- u64 qw1;
-};
-
-enum mic_dma_chan_owner {
- MIC_DMA_CHAN_MIC = 0,
- MIC_DMA_CHAN_HOST
-};
-
-/*
- * mic_dma_chan - channel specific information
- * @ch_num: channel number
- * @owner: owner of this channel
- * @last_tail: cached value of descriptor ring tail
- * @head: index of next descriptor in desc_ring
- * @issued: hardware notification point
- * @submitted: index that will be used to submit descriptors to h/w
- * @api_ch: dma engine api channel
- * @desc_ring: dma descriptor ring
- * @desc_ring_micpa: mic physical address of desc_ring
- * @status_dest: destination for status (fence) descriptor
- * @status_dest_micpa: mic address for status_dest,
- * DMA controller uses this address
- * @tx_array: array of async_tx
- * @cleanup_lock: lock held when processing completed tx
- * @prep_lock: lock held in prep_memcpy & released in tx_submit
- * @issue_lock: lock used to synchronize writes to head
- * @cookie: mic_irq cookie used with mic irq request
- */
-struct mic_dma_chan {
- int ch_num;
- enum mic_dma_chan_owner owner;
- u32 last_tail;
- u32 head;
- u32 issued;
- u32 submitted;
- struct dma_chan api_ch;
- struct mic_dma_desc *desc_ring;
- dma_addr_t desc_ring_micpa;
- u64 *status_dest;
- dma_addr_t status_dest_micpa;
- struct dma_async_tx_descriptor *tx_array;
- spinlock_t cleanup_lock;
- spinlock_t prep_lock;
- spinlock_t issue_lock;
- struct mic_irq *cookie;
-};
-
-/*
- * struct mic_dma_device - per mic device
- * @mic_ch: dma channels
- * @dma_dev: underlying dma device
- * @mbdev: mic bus dma device
- * @mmio: virtual address of the mmio space
- * @dbg_dir: debugfs directory
- * @start_ch: first channel number that can be used
- * @max_xfer_size: maximum transfer size per dma descriptor
- */
-struct mic_dma_device {
- struct mic_dma_chan mic_ch[MIC_DMA_MAX_NUM_CHAN];
- struct dma_device dma_dev;
- struct mbus_device *mbdev;
- void __iomem *mmio;
- struct dentry *dbg_dir;
- int start_ch;
- size_t max_xfer_size;
-};
-
-static inline struct mic_dma_chan *to_mic_dma_chan(struct dma_chan *ch)
-{
- return container_of(ch, struct mic_dma_chan, api_ch);
-}
-
-static inline struct mic_dma_device *to_mic_dma_dev(struct mic_dma_chan *ch)
-{
- return
- container_of((const typeof(((struct mic_dma_device *)0)->mic_ch)*)
- (ch - ch->ch_num), struct mic_dma_device, mic_ch);
-}
-
-static inline struct mbus_device *to_mbus_device(struct mic_dma_chan *ch)
-{
- return to_mic_dma_dev(ch)->mbdev;
-}
-
-static inline struct mbus_hw_ops *to_mbus_hw_ops(struct mic_dma_chan *ch)
-{
- return to_mbus_device(ch)->hw_ops;
-}
-
-static inline struct device *mic_dma_ch_to_device(struct mic_dma_chan *ch)
-{
- return to_mic_dma_dev(ch)->dma_dev.dev;
-}
-
-static inline void __iomem *mic_dma_chan_to_mmio(struct mic_dma_chan *ch)
-{
- return to_mic_dma_dev(ch)->mmio;
-}
-
-static inline u32 mic_dma_read_reg(struct mic_dma_chan *ch, u32 reg)
-{
- return ioread32(mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
- ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
-}
-
-static inline void mic_dma_write_reg(struct mic_dma_chan *ch, u32 reg, u32 val)
-{
- iowrite32(val, mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
- ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
-}
-
-static inline u32 mic_dma_mmio_read(struct mic_dma_chan *ch, u32 offset)
-{
- return ioread32(mic_dma_chan_to_mmio(ch) + offset);
-}
-
-static inline void mic_dma_mmio_write(struct mic_dma_chan *ch, u32 val,
- u32 offset)
-{
- iowrite32(val, mic_dma_chan_to_mmio(ch) + offset);
-}
-
-static inline u32 mic_dma_read_cmp_cnt(struct mic_dma_chan *ch)
-{
- return mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) &
- MIC_DMA_HW_CMP_CNT_MASK;
-}
-
-static inline void mic_dma_chan_set_owner(struct mic_dma_chan *ch)
-{
- u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
- u32 chan_num = ch->ch_num;
-
- dcr = (dcr & ~(0x1 << (chan_num * 2))) | (ch->owner << (chan_num * 2));
- mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-}
-
-static inline void mic_dma_enable_chan(struct mic_dma_chan *ch)
-{
- u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-
- dcr |= 2 << (ch->ch_num << 1);
- mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-}
-
-static inline void mic_dma_disable_chan(struct mic_dma_chan *ch)
-{
- u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-
- dcr &= ~(2 << (ch->ch_num << 1));
- mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-}
-
-static void mic_dma_chan_set_desc_ring(struct mic_dma_chan *ch)
-{
- u32 drar_hi;
- dma_addr_t desc_ring_micpa = ch->desc_ring_micpa;
-
- drar_hi = (MIC_DMA_DESC_RX_SIZE & 0x1ffff) << 4;
- if (MIC_DMA_CHAN_MIC == ch->owner) {
- drar_hi |= (desc_ring_micpa >> 32) & 0xf;
- } else {
- drar_hi |= MIC_DMA_SBOX_DRARHI_SYS_MASK;
- drar_hi |= ((desc_ring_micpa >> 34)
- & 0x1f) << 21;
- drar_hi |= (desc_ring_micpa >> 32) & 0x3;
- }
- mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_LO, (u32) desc_ring_micpa);
- mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_HI, drar_hi);
-}
-
-static inline void mic_dma_chan_mask_intr(struct mic_dma_chan *ch)
-{
- u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
-
- if (MIC_DMA_CHAN_MIC == ch->owner)
- dcar |= MIC_DMA_SBOX_DCAR_IM0;
- else
- dcar |= MIC_DMA_SBOX_DCAR_IM1;
- mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
-}
-
-static inline void mic_dma_chan_unmask_intr(struct mic_dma_chan *ch)
-{
- u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
-
- if (MIC_DMA_CHAN_MIC == ch->owner)
- dcar &= ~MIC_DMA_SBOX_DCAR_IM0;
- else
- dcar &= ~MIC_DMA_SBOX_DCAR_IM1;
- mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
-}
-
-static void mic_dma_ack_interrupt(struct mic_dma_chan *ch)
-{
- if (MIC_DMA_CHAN_MIC == ch->owner) {
- /* HW errata */
- mic_dma_chan_mask_intr(ch);
- mic_dma_chan_unmask_intr(ch);
- }
- to_mbus_hw_ops(ch)->ack_interrupt(to_mbus_device(ch), ch->ch_num);
-}
-#endif
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 9853bd3c4d45..017e5d8bd869 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -197,6 +197,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
protocols_imp[tot_num_ret + loop] = *(list + loop);
tot_num_ret += loop_num_ret;
+
+ scmi_reset_rx_to_maxsz(handle, t);
} while (loop_num_ret);
scmi_xfer_put(handle, t);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index c1cfe3ee3d55..4645677d86f1 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -192,6 +192,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
}
tot_rate_cnt += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 37fb583f1bf5..65063fa948d4 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -147,6 +147,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h,
struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
+void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index c5dea87edf8f..3dfd8b6a0ebf 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -402,6 +402,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
+void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->rx.len = info->desc->max_msg_size;
+}
+
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 2754f9d01636..ce336899d636 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1403,15 +1403,21 @@ static void scmi_protocols_late_init(struct work_struct *work)
"finalized PENDING handler - key:%X\n",
hndl->key);
ret = scmi_event_handler_enable_events(hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging INVALID handler - key:%X\n",
+ hndl->key);
+ scmi_put_active_handler(ni, hndl);
+ }
} else {
ret = scmi_valid_pending_handler(ni, hndl);
- }
- if (ret) {
- dev_dbg(ni->handle->dev,
- "purging PENDING handler - key:%X\n",
- hndl->key);
- /* this hndl can be only a pending one */
- scmi_put_handler_unlocked(ni, hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging PENDING handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ }
}
}
mutex_unlock(&ni->pending_mtx);
@@ -1468,7 +1474,7 @@ int scmi_notification_init(struct scmi_handle *handle)
ni->gid = gid;
ni->handle = handle;
- ni->notify_wq = alloc_workqueue("scmi_notify",
+ ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!ni->notify_wq)
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index ed475b40bd08..82fb3babff72 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -304,6 +304,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
}
tot_opp_cnt += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index f063cfe17e02..a981a22cfe89 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -36,9 +36,7 @@ struct scmi_msg_reset_domain_reset {
#define EXPLICIT_RESET_ASSERT BIT(1)
#define ASYNCHRONOUS_RESET BIT(2)
__le32 reset_state;
-#define ARCH_RESET_TYPE BIT(31)
-#define COLD_RESET_STATE BIT(0)
-#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
+#define ARCH_COLD_RESET 0
};
struct scmi_msg_reset_notify {
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 9703cf6356a0..b4232d611033 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -166,6 +166,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 1a03c3ec0230..82a82a5dc86a 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -149,6 +149,6 @@ static const struct scmi_transport_ops scmi_smc_ops = {
const struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
- .max_msg = 1,
+ .max_msg = 20,
.max_msg_size = 128,
};
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index e44d5de2a120..b966f5e28ebf 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1114,6 +1114,7 @@ static const struct aspeed_gpio_config ast2500_config =
static const struct aspeed_bank_props ast2600_bank_props[] = {
/* input output */
+ {4, 0xffffffff, 0x00ffffff}, /* Q/R/S/T */
{5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
{6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
{ },
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index a5b326754124..2a9046c0fb16 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -343,8 +343,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
#ifdef CONFIG_PM_SLEEP
static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
struct dwapb_context *ctx = gpio->ports[0].ctx;
irq_hw_number_t bit = irqd_to_hwirq(d);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 6d59e3a43761..f7ceb2b11afc 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1114,13 +1114,23 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
void __iomem *base = bank->base;
- u32 nowake;
+ u32 mask, nowake;
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
+ /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
+ mask &= ~bank->context.risingdetect;
+ bank->saved_datain |= mask;
+
+ /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
+ mask &= ~bank->context.fallingdetect;
+ bank->saved_datain &= ~mask;
+
if (!may_lose_context)
goto update_gpio_context_count;
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index a68941d19ac6..2a07fd96707e 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -28,6 +28,47 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+/*
+ * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
+ *
+ * Bit: Description
+ * 0: Enable Interrupt Sources (Bit 0)
+ * 1: Enable Interrupt Sources (Bit 1)
+ * 2: Generate Internal PCI Bus Internal SERR# Interrupt
+ * 3: Mailbox Interrupt Enable
+ * 4: Power Management Interrupt Enable
+ * 5: Power Management Interrupt
+ * 6: Slave Read Local Data Parity Check Error Enable
+ * 7: Slave Read Local Data Parity Check Error Status
+ * 8: Internal PCI Wire Interrupt Enable
+ * 9: PCI Express Doorbell Interrupt Enable
+ * 10: PCI Abort Interrupt Enable
+ * 11: Local Interrupt Input Enable
+ * 12: Retry Abort Enable
+ * 13: PCI Express Doorbell Interrupt Active
+ * 14: PCI Abort Interrupt Active
+ * 15: Local Interrupt Input Active
+ * 16: Local Interrupt Output Enable
+ * 17: Local Doorbell Interrupt Enable
+ * 18: DMA Channel 0 Interrupt Enable
+ * 19: DMA Channel 1 Interrupt Enable
+ * 20: Local Doorbell Interrupt Active
+ * 21: DMA Channel 0 Interrupt Active
+ * 22: DMA Channel 1 Interrupt Active
+ * 23: Built-In Self-Test (BIST) Interrupt Active
+ * 24: Direct Master was the Bus Master during a Master or Target Abort
+ * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort
+ * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort
+ * 27: Target Abort after internal 256 consecutive Master Retrys
+ * 28: PCI Bus wrote data to LCS_MBOX0
+ * 29: PCI Bus wrote data to LCS_MBOX1
+ * 30: PCI Bus wrote data to LCS_MBOX2
+ * 31: PCI Bus wrote data to LCS_MBOX3
+ */
+#define PLX_PEX8311_PCI_LCS_INTCSR 0x68
+#define INTCSR_INTERNAL_PCI_WIRE BIT(8)
+#define INTCSR_LOCAL_INPUT BIT(11)
+
/**
* struct idio_24_gpio_reg - GPIO device registers structure
* @out0_7: Read: FET Outputs 0-7
@@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
struct idio_24_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
+ __u8 __iomem *plx;
struct idio_24_gpio_reg __iomem *reg;
unsigned long irq_mask;
};
@@ -334,13 +376,13 @@ static void idio_24_irq_mask(struct irq_data *data)
unsigned long flags;
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
unsigned char new_irq_mask;
- const unsigned long bank_offset = bit_offset/8 * 8;
+ const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- idio24gpio->irq_mask &= BIT(bit_offset);
- new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask &= ~BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
if (!new_irq_mask) {
cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
@@ -363,12 +405,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
unsigned long flags;
unsigned char prev_irq_mask;
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
- const unsigned long bank_offset = bit_offset/8 * 8;
+ const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
idio24gpio->irq_mask |= BIT(bit_offset);
if (!prev_irq_mask) {
@@ -455,6 +497,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *const dev = &pdev->dev;
struct idio_24_gpio *idio24gpio;
int err;
+ const size_t pci_plx_bar_index = 1;
const size_t pci_bar_index = 2;
const char *const name = pci_name(pdev);
struct gpio_irq_chip *girq;
@@ -469,12 +512,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
if (err) {
dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
return err;
}
+ idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
idio24gpio->chip.label = name;
@@ -504,6 +548,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Software board reset */
iowrite8(0, &idio24gpio->reg->soft_reset);
+ /*
+ * enable PLX PEX8311 internal PCI wire interrupt and local interrupt
+ * input
+ */
+ iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
+ idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
if (err) {
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index c54dd08f2cbf..d5eb9ca11901 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -183,7 +183,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return PTR_ERR(chip->regs);
ngpio = of_irq_count(node);
- if (ngpio >= SIFIVE_GPIO_MAX) {
+ if (ngpio > SIFIVE_GPIO_MAX) {
dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
SIFIVE_GPIO_MAX);
return -ENXIO;
diff --git a/drivers/gpio/gpiolib-cdev.h b/drivers/gpio/gpiolib-cdev.h
index cb41dd757338..b42644cbffb8 100644
--- a/drivers/gpio/gpiolib-cdev.h
+++ b/drivers/gpio/gpiolib-cdev.h
@@ -7,22 +7,7 @@
struct gpio_device;
-#ifdef CONFIG_GPIO_CDEV
-
int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt);
void gpiolib_cdev_unregister(struct gpio_device *gdev);
-#else
-
-static inline int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
-{
- return 0;
-}
-
-static inline void gpiolib_cdev_unregister(struct gpio_device *gdev)
-{
-}
-
-#endif /* CONFIG_GPIO_CDEV */
-
#endif /* GPIOLIB_CDEV_H */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3cdf9effc13a..089ddcaa9bc6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -480,11 +480,23 @@ static void gpiodevice_release(struct device *dev)
kfree(gdev);
}
+#ifdef CONFIG_GPIO_CDEV
+#define gcdev_register(gdev, devt) gpiolib_cdev_register((gdev), (devt))
+#define gcdev_unregister(gdev) gpiolib_cdev_unregister((gdev))
+#else
+/*
+ * gpiolib_cdev_register() indirectly calls device_add(), which is still
+ * required even when cdev is not selected.
+ */
+#define gcdev_register(gdev, devt) device_add(&(gdev)->dev)
+#define gcdev_unregister(gdev) device_del(&(gdev)->dev)
+#endif
+
static int gpiochip_setup_dev(struct gpio_device *gdev)
{
int ret;
- ret = gpiolib_cdev_register(gdev, gpio_devt);
+ ret = gcdev_register(gdev, gpio_devt);
if (ret)
return ret;
@@ -500,7 +512,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
return 0;
err_remove_device:
- gpiolib_cdev_unregister(gdev);
+ gcdev_unregister(gdev);
return ret;
}
@@ -825,7 +837,7 @@ void gpiochip_remove(struct gpio_chip *gc)
* be removed, else it will be dangling until the last user is
* gone.
*/
- gpiolib_cdev_unregister(gdev);
+ gcdev_unregister(gdev);
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 37da3537ba2e..e3783f5a459d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -80,6 +80,7 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -239,9 +240,11 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
return amdgpu_asic_supports_baco(adev);
}
+/*
+ * VRAM access helper functions
+ */
+
/**
- * VRAM access helper functions.
- *
* amdgpu_device_vram_access - read/write a buffer in vram
*
* @adev: amdgpu_device pointer
@@ -705,7 +708,7 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
/**
* amdgpu_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -722,7 +725,7 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -739,7 +742,7 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
/**
* amdgpu_invalid_rreg64 - dummy 64 bit reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -756,7 +759,7 @@ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg64 - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -773,7 +776,7 @@ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint
/**
* amdgpu_block_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
*
@@ -793,7 +796,7 @@ static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
/**
* amdgpu_block_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
* @v: value to write to the register
@@ -813,7 +816,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
/**
* amdgpu_device_asic_init - Wrapper for atom asic_init
*
- * @dev: drm_device pointer
+ * @adev: amdgpu_device pointer
*
* Does any asic specific work and then calls atom asic init.
*/
@@ -827,7 +830,7 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
/**
* amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
@@ -844,7 +847,7 @@ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
/**
* amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
@@ -1803,7 +1806,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
case CHIP_NAVI10:
chip_name = "navi10";
@@ -3011,7 +3017,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
/**
* amdgpu_device_has_dc_support - check if dc is supported
*
- * @adev: amdgpu_device_pointer
+ * @adev: amdgpu_device pointer
*
* Returns true for supported, false for not supported
*/
@@ -4045,7 +4051,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
@@ -4100,7 +4106,7 @@ error:
/**
* amdgpu_device_has_job_running - check if there is any job in mirror list
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* check if there is any job in mirror list
*/
@@ -4128,7 +4134,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
/**
* amdgpu_device_should_recover_gpu - check if we should try GPU recovery
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
* a hung GPU.
@@ -4477,7 +4483,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @job: which job trigger hang
*
* Attempt to reset the GPU if it has hung (all asics).
@@ -4497,7 +4503,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false;
bool audio_suspended = false;
- /**
+ /*
* Special case: RAS triggered and full reset isn't supported
*/
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index c241317edee7..42d9748921f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1066,6 +1066,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index aa7f230c71bf..7e8265da9f25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,6 +596,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags;
+ uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
@@ -616,6 +617,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ if (args->va_address + args->map_size > vm_size) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%llx is in top reserved area 0x%llx\n",
+ args->va_address + args->map_size, vm_size);
+ return -EINVAL;
+ }
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f203e4a6a3f2..731f3aa2e6ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -81,8 +81,8 @@ static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func;
/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
- * @man: TTM memory type manager
- * @p_size: maximum size of GTT
+ * @adev: amdgpu_device pointer
+ * @gtt_size: maximum size of GTT
*
* Allocate and initialize the GTT manager.
*/
@@ -123,7 +123,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
/**
* amdgpu_gtt_mgr_fini - free and destroy GTT manager
*
- * @man: TTM memory type manager
+ * @adev: amdgpu_device pointer
*
* Destroy and free the GTT manager, returns -EBUSY if ranges are still
* allocated inside it.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 96a9699f87ba..a6dbe4b83533 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2524,6 +2524,7 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
+ psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f3b7287e84c4..a563328e3dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -39,6 +39,7 @@
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
+#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
@@ -50,6 +51,7 @@ MODULE_FIRMWARE(FIRMWARE_PICASSO);
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
MODULE_FIRMWARE(FIRMWARE_RENOIR);
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
MODULE_FIRMWARE(FIRMWARE_NAVI14);
MODULE_FIRMWARE(FIRMWARE_NAVI12);
@@ -89,7 +91,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.indirect_sram = true;
break;
case CHIP_RENOIR:
- fw_name = FIRMWARE_RENOIR;
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ fw_name = FIRMWARE_RENOIR;
+ else
+ fw_name = FIRMWARE_GREEN_SARDINE;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index c6abb16e8018..58c83a7ad0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -112,8 +112,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB_0 1
#define AMDGPU_MMHUB_1 2
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
+/* Reserve 2MB at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 01c1171afbe0..0c6b7c5ecfec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -168,8 +168,7 @@ static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
- * @man: TTM memory type manager
- * @p_size: maximum size of VRAM
+ * @adev: amdgpu_device pointer
*
* Allocate and initialize the VRAM manager.
*/
@@ -199,7 +198,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
/**
* amdgpu_vram_mgr_fini - free and destroy VRAM manager
*
- * @man: TTM memory type manager
+ * @adev: amdgpu_device pointer
*
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still
* allocated inside it.
@@ -229,7 +228,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
/**
* amdgpu_vram_mgr_vis_size - Calculate visible node size
*
- * @adev: amdgpu device structure
+ * @adev: amdgpu_device pointer
* @node: MM node structure
*
* Calculate how many bytes of the MM node are inside visible VRAM
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 03ff8bd1fee8..5442df094102 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1336,11 +1336,13 @@ cik_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- case CHIP_HAWAII:
/* disable baco reset until it works */
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
baco_reset = false;
break;
+ case CHIP_HAWAII:
+ baco_reset = cik_asic_supports_baco(adev);
+ break;
default:
baco_reset = false;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 20f108818b2b..a3c3fe96515f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS2);
+ u32 tmp;
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
- }
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 56fdbe626d30..3579565e0eab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -128,6 +128,9 @@
#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
+#define mmCGTT_SPI_CS_CLK_CTRL 0x507c
+#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX 1
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3094,6 +3097,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
static const struct soc15_reg_golden golden_settings_gc_10_3[] =
{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6959aebae6d4..0d8e203b10ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -117,6 +117,13 @@ MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
+
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
@@ -1630,7 +1637,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 1ce741a0c6a7..8eeba8096493 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -455,6 +455,15 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
+static bool nv_is_headless_sku(struct pci_dev *pdev)
+{
+ if ((pdev->device == 0x731E &&
+ (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+ (pdev->device == 0x7340 && pdev->revision == 0xC9))
+ return true;
+ return false;
+}
+
int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -491,7 +500,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!nv_is_headless_sku(adev->pdev))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 75489313dbad..c4828bd3264b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -39,6 +39,8 @@
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -54,7 +56,10 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
switch (adev->asic_type) {
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 86fb1eddf5a6..e82f49f62f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -69,6 +69,7 @@ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -619,7 +620,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index afcccc6c0fc6..f57c5f57efa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1195,8 +1195,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
- AMD_PG_SUPPORT_VCN |
- AMD_PG_SUPPORT_VCN_DPG;
+ AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1243,7 +1242,15 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- adev->apu_flags |= AMD_APU_IS_RENOIR;
+ if (adev->pdev->device == 0x1636)
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
+ else
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ adev->external_rev_id = adev->rev_id + 0x91;
+ else
+ adev->external_rev_id = adev->rev_id + 0xa1;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
@@ -1268,7 +1275,6 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
- adev->external_rev_id = adev->rev_id + 0x91;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 5e2254b9e931..3de5e14c5ae3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -798,10 +798,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
}
pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
- memcpy(pcrat_image, crat_table, crat_table->length);
if (!pcrat_image)
return -ENOMEM;
+ memcpy(pcrat_image, crat_table, crat_table->length);
*crat_image = pcrat_image;
*size = crat_table->length;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index f24abf428534..60dfdd432aba 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -42,6 +42,7 @@ config DRM_AMD_DC_SI
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
+ depends on KGDB
help
Choose this option if you want to hit kdgb_break in assert.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e2b23486ba4c..e93e18c06c0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -100,6 +100,8 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
#endif
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -583,7 +585,7 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_comressor_info *compressor = &adev->dm.compressor;
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
struct drm_display_mode *mode;
unsigned long max_size = 0;
@@ -973,6 +975,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_RENOIR:
init_data.flags.gpu_vm_support = true;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ init_data.flags.disable_dmcu = true;
break;
default:
break;
@@ -1267,6 +1271,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case CHIP_RENOIR:
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 34f6369bf51f..a8a0e8cb1a11 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -86,7 +86,7 @@ struct irq_list_head {
* @bo_ptr: Pointer to the buffer object
* @gpu_addr: MMIO gpu addr
*/
-struct dm_comressor_info {
+struct dm_compressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
@@ -148,7 +148,7 @@ struct amdgpu_dm_backlight_caps {
* @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @cached_dc_state: Cached state of content streams
- * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
* @force_timing_sync: set via debugfs. When set, indicates that all connected
* displays will be forced to synchronize.
*/
@@ -324,7 +324,7 @@ struct amdgpu_display_manager {
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
- struct dm_comressor_info compressor;
+ struct dm_compressor_info compressor;
const struct firmware *fw_dmcu;
uint32_t dmcu_fw_version;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index efb909ef7a0f..857f156e4985 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -166,6 +166,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
}
+
+ if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
+ rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ break;
+ }
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1eb29c362122..45ad05f6e03b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1571,8 +1571,8 @@ static void init_state(struct dc *dc, struct dc_state *context)
struct dc_state *dc_create_state(struct dc *dc)
{
- struct dc_state *context = kzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
+ struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
if (!context)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e430148e47cf..59d48cf819ea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -120,6 +120,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCN_VERSION_1_01;
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_2_1;
+ if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_2_1;
break;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 9cc65dc1970f..49ae5ff12da6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1149,7 +1149,8 @@ static uint32_t dcn3_get_pix_clk_dividers(
static const struct clock_source_funcs dcn3_clk_src_funcs = {
.cs_power_down = dce110_clock_source_power_down,
.program_pix_clk = dcn3_program_pix_clk,
- .get_pix_clk_dividers = dcn3_get_pix_clk_dividers
+ .get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
+ .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
#endif
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 24fb39a11e5d..2455d210ccf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -2105,12 +2105,12 @@ static bool dcn30_internal_validate_bw(
if (split[i]) {
if (odm) {
- if (split[i] == 4 && old_pipe->next_odm_pipe->next_odm_pipe)
+ if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
else if (old_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->pipe_idx;
} else {
- if (split[i] == 4 && old_pipe->bottom_pipe->bottom_pipe &&
+ if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
else if (old_pipe->bottom_pipe &&
@@ -2150,10 +2150,12 @@ static bool dcn30_internal_validate_bw(
goto validate_fail;
newly_split[pipe_4to1->pipe_idx] = true;
- if (odm && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+ if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
else
old_index = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
index 3be2c90b0c61..21583699f992 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
@@ -117,6 +117,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
ddc_data_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(DATA),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
@@ -126,6 +132,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
ddc_clk_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(CLK),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_sh_mask ddc_shift[] = {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f67c18375bfd..dac427b68fd7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex(
enum gpio_mode mode)
{
if (gpio->pin) {
- ASSERT_CRITICAL(false);
+ BREAK_TO_DEBUGGER();
return GPIO_RESULT_ALREADY_OPENED;
}
// No action if allocation failed during gpio construct
if (!gpio->hw_container.ddc) {
- ASSERT_CRITICAL(false);
+ BREAK_TO_DEBUGGER();
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
gpio->mode = mode;
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index 49689f71f4f1..0effbb2bd74a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -306,8 +306,8 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
- [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 330acaaed79a..95cb56929e79 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -94,36 +94,27 @@
* general debug capabilities
*
*/
-#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
-#define ASSERT_CRITICAL(expr) do { \
- if (WARN_ON(!(expr))) { \
- kgdb_breakpoint(); \
- } \
-} while (0)
+#ifdef CONFIG_DEBUG_KERNEL_DC
+#define dc_breakpoint() kgdb_breakpoint()
#else
-#define ASSERT_CRITICAL(expr) do { \
- if (WARN_ON(!(expr))) { \
- ; \
- } \
-} while (0)
+#define dc_breakpoint() do {} while (0)
#endif
-#if defined(CONFIG_DEBUG_KERNEL_DC)
-#define ASSERT(expr) ASSERT_CRITICAL(expr)
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) \
+ dc_breakpoint(); \
+ } while (0)
-#else
-#define ASSERT(expr) WARN_ON_ONCE(!(expr))
-#endif
+#define ASSERT(expr) do { \
+ if (WARN_ON_ONCE(!(expr))) \
+ dc_breakpoint(); \
+ } while (0)
-#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
#define BREAK_TO_DEBUGGER() \
do { \
DRM_DEBUG_DRIVER("%s():%d\n", __func__, __LINE__); \
- kgdb_breakpoint(); \
+ dc_breakpoint(); \
} while (0)
-#else
-#define BREAK_TO_DEBUGGER() DRM_DEBUG_DRIVER("%s():%d\n", __func__, __LINE__)
-#endif
#define DC_ERR(...) do { \
dm_error(__VA_ARGS__); \
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index b267987aed06..ffcb059297d3 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -205,6 +205,10 @@ enum {
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0))
#endif
+#define GREEN_SARDINE_A0 0xA1
+#ifndef ASICREV_IS_GREEN_SARDINE
+#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
+#endif
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 10dc481ecbc4..06c1aabf10ce 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -45,6 +45,7 @@ enum amd_apu_flags {
AMD_APU_IS_RAVEN2 = 0x00000002UL,
AMD_APU_IS_PICASSO = 0x00000004UL,
AMD_APU_IS_RENOIR = 0x00000008UL,
+ AMD_APU_IS_GREEN_SARDINE = 0x00000010UL,
};
/**
diff --git a/drivers/gpu/drm/amd/pm/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/inc/hwmgr.h
index 3898a95ec28b..518796a26eda 100644
--- a/drivers/gpu/drm/amd/pm/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/inc/hwmgr.h
@@ -229,6 +229,7 @@ struct pp_smumgr_func {
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
+ int (*stop_smc)(struct pp_hwmgr *hwmgr);
};
struct pp_hwmgr_func {
diff --git a/drivers/gpu/drm/amd/pm/inc/smumgr.h b/drivers/gpu/drm/amd/pm/inc/smumgr.h
index ad100b533d04..5f46f1a4f38e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/pm/inc/smumgr.h
@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
+extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
index 3be40114e63d..45f608838f6e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
@@ -142,12 +142,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
@@ -155,6 +155,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
static const struct baco_cmd_entry clean_baco_tbl[] =
{
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 1e8919b0acdb..35629140fc7a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -1541,6 +1541,10 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to reset to default!", result = tmp_result);
+ tmp_result = smum_stop_smc(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop smc!", result = tmp_result);
+
tmp_result = smu7_force_switch_to_arbf0(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to force to switch arbf0!", result = tmp_result);
@@ -1585,18 +1589,24 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->current_profile_setting.sclk_down_hyst = 100;
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
data->current_profile_setting.bupdate_mclk = 1;
- if (adev->gmc.vram_width == 256) {
- data->current_profile_setting.mclk_up_hyst = 10;
- data->current_profile_setting.mclk_down_hyst = 60;
- data->current_profile_setting.mclk_activity = 25;
- } else if (adev->gmc.vram_width == 128) {
- data->current_profile_setting.mclk_up_hyst = 5;
- data->current_profile_setting.mclk_down_hyst = 16;
- data->current_profile_setting.mclk_activity = 20;
- } else if (adev->gmc.vram_width == 64) {
- data->current_profile_setting.mclk_up_hyst = 3;
- data->current_profile_setting.mclk_down_hyst = 16;
- data->current_profile_setting.mclk_activity = 20;
+ if (hwmgr->chip_id >= CHIP_POLARIS10) {
+ if (adev->gmc.vram_width == 256) {
+ data->current_profile_setting.mclk_up_hyst = 10;
+ data->current_profile_setting.mclk_down_hyst = 60;
+ data->current_profile_setting.mclk_activity = 25;
+ } else if (adev->gmc.vram_width == 128) {
+ data->current_profile_setting.mclk_up_hyst = 5;
+ data->current_profile_setting.mclk_down_hyst = 16;
+ data->current_profile_setting.mclk_activity = 20;
+ } else if (adev->gmc.vram_width == 64) {
+ data->current_profile_setting.mclk_up_hyst = 3;
+ data->current_profile_setting.mclk_down_hyst = 16;
+ data->current_profile_setting.mclk_activity = 20;
+ }
+ } else {
+ data->current_profile_setting.mclk_up_hyst = 0;
+ data->current_profile_setting.mclk_down_hyst = 100;
+ data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
}
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index e4d1f3d66ef4..329bf4d44bbc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -2726,10 +2726,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS,
- VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return ci_is_smc_ram_running(hwmgr);
}
static int ci_smu_init(struct pp_hwmgr *hwmgr)
@@ -2939,6 +2936,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
return 0;
}
+static void ci_reset_smc(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+}
+
+
+static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 1);
+}
+
+static int ci_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ ci_reset_smc(hwmgr);
+ ci_stop_smc_clock(hwmgr);
+
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.name = "ci_smu",
.smu_init = ci_smu_init,
@@ -2964,4 +2984,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
.update_smc_table = ci_update_smc_table,
+ .stop_smc = ci_stop_smc,
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
index b6fb48066841..b6921db3c130 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
@@ -245,3 +245,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
return -EINVAL;
}
+
+int smum_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->smumgr_funcs->stop_smc)
+ return hwmgr->smumgr_funcs->stop_smc(hwmgr);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index fc4f95fa87cf..b1e5ec01527b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1029,17 +1029,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- /*
- * Set initialized values (get from vbios) to dpm tables context such as
- * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
- * type of clks.
- */
- ret = smu_set_default_dpm_table(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
- return ret;
- }
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 8d8081c6bd38..ef1a62e86a0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1361,14 +1361,9 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- switch (smu_v11_0_get_fan_control_mode(smu)) {
- case AMD_FAN_CTRL_AUTO:
- return navi10_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
- default:
- return smu_v11_0_get_fan_speed_rpm(smu, speed);
- }
+ return navi10_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
}
static int navi10_get_fan_parameters(struct smu_context *smu)
@@ -2534,29 +2529,6 @@ static const struct i2c_algorithm navi10_i2c_algo = {
.functionality = navi10_i2c_func,
};
-static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
-
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &navi10_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
-
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
-
- return res;
-}
-
-static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
-{
- i2c_del_adapter(control);
-}
-
static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -2687,8 +2659,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
- .i2c_init = navi10_i2c_control_init,
- .i2c_fini = navi10_i2c_control_fini,
.print_clk_levels = navi10_print_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
.populate_umd_state_clk = navi10_populate_umd_state_clk,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 685a8a3b25d4..895d89bea7fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1177,14 +1177,9 @@ static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- switch (smu_v11_0_get_fan_control_mode(smu)) {
- case AMD_FAN_CTRL_AUTO:
- return sienna_cichlid_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
- default:
- return smu_v11_0_get_fan_speed_rpm(smu, speed);
- }
+ return sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
}
static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index 511d67b16d14..ef8c230e0f62 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -13,7 +13,7 @@ config DRM_CDNS_MHDP8546
if DRM_CDNS_MHDP8546
config DRM_CDNS_MHDP8546_J721E
- depends on ARCH_K3_J721E_SOC || COMPILE_TEST
+ depends on ARCH_K3 || COMPILE_TEST
bool "J721E Cadence DPI/DP wrapper support"
default y
help
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 90807a6b415c..deeed73f4ed6 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -374,6 +374,10 @@ static bool is_edid_digital_input_dp(const struct edid *edid)
* drm_dp_downstream_is_type() - is the downstream facing port of certain type?
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
+ * @type: port type to be checked. Can be:
+ * %DP_DS_PORT_TYPE_DP, %DP_DS_PORT_TYPE_VGA, %DP_DS_PORT_TYPE_DVI,
+ * %DP_DS_PORT_TYPE_HDMI, %DP_DS_PORT_TYPE_NON_EDID,
+ * %DP_DS_PORT_TYPE_DP_DUALMODE or %DP_DS_PORT_TYPE_WIRELESS.
*
* Caveat: Only works with DPCD 1.1+ port caps.
*
@@ -870,6 +874,7 @@ EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
/**
* drm_dp_downstream_mode() - return a mode for downstream facing port
+ * @dev: DRM device
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
@@ -1028,7 +1033,8 @@ EXPORT_SYMBOL(drm_dp_downstream_debug);
/**
* drm_dp_subconnector_type() - get DP branch device type
- *
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
*/
enum drm_mode_subconnector
drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -1079,6 +1085,10 @@ EXPORT_SYMBOL(drm_dp_subconnector_type);
/**
* drm_mode_set_dp_subconnector_property - set subconnector for DP connector
+ * @connector: connector to set property on
+ * @status: connector status
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
*
* Called by a driver on every detect event.
*/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a82f37d44258..631125b46e04 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3741,7 +3741,7 @@ drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
/**
* drm_display_mode_from_cea_vic() - return a mode for CEA VIC
* @dev: DRM device
- * @vic: CEA VIC of the mode
+ * @video_code: CEA VIC of the mode
*
* Creates a new mode matching the specified CEA VIC.
*
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 19d73868490e..69c2c079d803 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1085,6 +1085,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_get(obj);
+ vma->vm_private_data = obj;
+
if (obj->funcs && obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret) {
@@ -1107,8 +1109,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
- vma->vm_private_data = obj;
-
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d77c9f8ff26c..e00616d94f26 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -593,8 +593,13 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- if (obj->import_attach)
+ if (obj->import_attach) {
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(obj);
+ vma->vm_private_data = NULL;
+
return dma_buf_mmap(obj->dma_buf, vma, 0);
+ }
shmem = to_drm_gem_shmem_obj(obj);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d6808f678db5..9f955f2010c2 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -794,6 +794,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
/**
* drm_prime_pages_to_sg - converts a page array into an sg list
+ * @dev: DRM device
* @pages: pointer to the array of page pointers to convert
* @nr_pages: length of the page vector
*
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 15eb3770d817..361e3a0c5ab6 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -347,6 +347,7 @@ int psb_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -359,20 +360,12 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
if (dev_priv->ops->hotplug_enable)
dev_priv->ops->hotplug_enable(dev, true);
@@ -385,6 +378,7 @@ void psb_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -393,14 +387,10 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
_PSB_IRQ_MSVDX_FLAG |
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 829b2a40a482..31337d2a2cde 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10636,6 +10636,10 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
+ /* 90/270 degree rotation would require extra work */
+ if (drm_rotation_90_or_270(plane_config->rotation))
+ goto error;
+
base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 8a9d0bdde1bf..40e9cb29233d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1754,7 +1754,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
return;
intel_connector = to_intel_connector(connector);
- dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
+ dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder));
if (dev_priv->psr.dp != &dig_port->dp)
return;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 7c90a63c273d..fcce6909f201 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -509,21 +509,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/*
- * Already in the desired write domain? Nothing for us to do!
- *
- * We apply a little bit of cunning here to catch a broader set of
- * no-ops. If obj->write_domain is set, we must be in the same
- * obj->read_domains, and only that domain. Therefore, if that
- * obj->write_domain matches the request read_domains, we are
- * already in the same read/write domain and can skip the operation,
- * without having to further check the requested write_domain.
- */
- if (READ_ONCE(obj->write_domain) == read_domains) {
- err = 0;
- goto out;
- }
-
- /*
* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
@@ -560,6 +545,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains)
+ goto out_unpin;
+
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index b5c15557cc87..d6711caa7f39 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -56,6 +56,8 @@ struct drm_i915_gem_object_ops {
void (*truncate)(struct drm_i915_gem_object *obj);
void (*writeback)(struct drm_i915_gem_object *obj);
+ int (*pread)(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *arg);
int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 28147aab47b9..3a4dfe2ef1da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -134,6 +134,58 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
vaddr, dma);
}
+static int
+phys_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ int err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ /*
+ * We manually control the domain here and pretend that it
+ * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+ */
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+
+ if (copy_from_user(vaddr, user_data, args->size))
+ return -EFAULT;
+
+ drm_clflush_virt_range(vaddr, args->size);
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ return 0;
+}
+
+static int
+phys_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
+{
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ int err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ drm_clflush_virt_range(vaddr, args->size);
+ if (copy_to_user(user_data, vaddr, args->size))
+ return -EFAULT;
+
+ return 0;
+}
+
static void phys_release(struct drm_i915_gem_object *obj)
{
fput(obj->base.filp);
@@ -144,6 +196,9 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
+ .pread = phys_pread,
+ .pwrite = phys_pwrite,
+
.release = phys_release,
};
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 7c3a1012e702..760fefdfe392 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -245,22 +245,14 @@ static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u
}
static inline u32 *
-__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- /* w/a for post sync ops following a GPGPU operation we
- * need a prior CS_STALL, which is emitted by the flush
- * following the batch.
- */
*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
- *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
- *cs++ = gtt_offset;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
*cs++ = 0;
*cs++ = value;
- /* We're thrashing one dword of HWS. */
- *cs++ = 0;
+ *cs++ = 0; /* We're thrashing one extra dword. */
return cs;
}
@@ -268,13 +260,38 @@ __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 f
static inline u32*
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
- return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
}
static inline u32*
gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
{
- return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
}
static inline u32 *
@@ -285,12 +302,10 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
- *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = value;
-
- return cs;
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
}
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5bfb5f7ed02c..efdeb7b7b2a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -371,7 +371,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* instances.
*/
if ((INTEL_GEN(i915) >= 11 &&
- engine->gt->info.vdbox_sfc_access & engine->mask) ||
+ (engine->gt->info.vdbox_sfc_access &
+ BIT(engine->instance))) ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a32aabce7901..f82c6dd1de18 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3547,6 +3547,19 @@ static const struct intel_context_ops execlists_context_ops = {
.destroy = execlists_context_destroy,
};
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline_cacheline *cl;
+
+ /* Before the request is executed, the timeline/cachline is fixed */
+
+ cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+ if (cl)
+ return cl->ggtt_offset;
+
+ return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
@@ -3569,7 +3582,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = MI_NOOP;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_request_timeline(rq)->hwsp_offset;
+ *cs++ = hwsp_offset(rq);
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
@@ -4886,11 +4899,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
-static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
{
- u32 addr = i915_request_active_timeline(request)->hwsp_offset;
-
- return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
}
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
@@ -4909,7 +4920,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
+ hwsp_offset(request),
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
@@ -4921,7 +4932,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
+ hwsp_offset(request),
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -4983,7 +4994,9 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
{
- return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
}
static u32 *
@@ -4991,7 +5004,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen12_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
+ hwsp_offset(request),
PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index a2f74cefe4c3..7ea94d201fe6 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -188,10 +188,14 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return cl;
}
-static void cacheline_acquire(struct intel_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl,
+ u32 ggtt_offset)
{
- if (cl)
- i915_active_acquire(&cl->active);
+ if (!cl)
+ return;
+
+ cl->ggtt_offset = ggtt_offset;
+ i915_active_acquire(&cl->active);
}
static void cacheline_release(struct intel_timeline_cacheline *cl)
@@ -340,7 +344,7 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
- cacheline_acquire(tl->hwsp_cacheline);
+ cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
if (atomic_fetch_inc(&tl->pin_count)) {
cacheline_release(tl->hwsp_cacheline);
__i915_vma_unpin(tl->hwsp_ggtt);
@@ -515,7 +519,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
- cacheline_acquire(cl);
+ cacheline_acquire(cl, tl->hwsp_offset);
tl->hwsp_cacheline = cl;
*seqno = timeline_advance(tl);
@@ -573,9 +577,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
if (err)
goto out;
- *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
- ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
-
+ *hwsp = cl->ggtt_offset;
out:
i915_active_release(&cl->active);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 02181c5020db..4474f487f589 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -94,6 +94,8 @@ struct intel_timeline_cacheline {
struct intel_timeline_hwsp *hwsp;
void *vaddr;
+ u32 ggtt_offset;
+
struct rcu_head rcu;
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 3be37e6fe33d..eb342a759943 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1489,7 +1489,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
const struct intel_engine_cs *engine =
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+ if (value != 0 &&
+ !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
@@ -1650,6 +1651,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
+/**
+ * FixMe:
+ * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
+ * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
+ * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
+ * these MI_BATCH_BUFFER.
+ * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
+ * PML4 PTE: PAT(0) PCD(1) PWT(1).
+ * The performance is still expected to be low, will need further improvement.
+ */
+static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u64 pat =
+ GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
+
+ return 0;
+}
+
static int guc_status_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
@@ -2812,7 +2841,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
@@ -3139,7 +3168,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
return 0;
}
@@ -3313,9 +3342,21 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1570eb8aa978..aed2ef6466a2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1277,7 +1277,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->gt, id)
- intel_context_unpin(s->shadow[id]);
+ intel_context_put(s->shadow[id]);
kmem_cache_destroy(s->workloads);
}
@@ -1369,11 +1369,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
ce->ring = __intel_context_ring_size(ring_size);
}
- ret = intel_context_pin(ce);
- intel_context_put(ce);
- if (ret)
- goto out_shadow_ctx;
-
s->shadow[i] = ce;
}
@@ -1405,7 +1400,6 @@ out_shadow_ctx:
if (IS_ERR(s->shadow[i]))
break;
- intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]);
}
i915_vm_put(&ppgtt->vm);
@@ -1479,6 +1473,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu_submission *s = &workload->vgpu->submission;
+ intel_context_unpin(s->shadow[workload->engine->id]);
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
@@ -1724,6 +1719,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
return ERR_PTR(ret);
}
+ ret = intel_context_pin(s->shadow[engine->id]);
+ if (ret) {
+ intel_vgpu_destroy_workload(workload);
+ return ERR_PTR(ret);
+ }
+
return workload;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bb0c12975f38..58276694c848 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,30 +180,6 @@ try_again:
}
static int
-i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
-{
- void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
- char __user *user_data = u64_to_user_ptr(args->data_ptr);
-
- /*
- * We manually control the domain here and pretend that it
- * remains coherent i.e. in the GTT domain, like shmem_pwrite.
- */
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
-
- if (copy_from_user(vaddr, user_data, args->size))
- return -EFAULT;
-
- drm_clflush_virt_range(vaddr, args->size);
- intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- return 0;
-}
-
-static int
i915_gem_create(struct drm_file *file,
struct intel_memory_region *mr,
u64 *size_p,
@@ -527,6 +503,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pread(obj, args->offset, args->size);
+ ret = -ENODEV;
+ if (obj->ops->pread)
+ ret = obj->ops->pread(obj, args);
+ if (ret != -ENODEV)
+ goto out;
+
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
@@ -866,8 +848,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret == -EFAULT || ret == -ENOSPC) {
if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(obj, args);
- else
- ret = i915_gem_phys_pwrite(obj, args, file);
}
i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 366ddfc8df6b..fb5e30de78c2 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -389,6 +389,7 @@ static const struct intel_device_info ilk_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
+ .has_rps = true,
.display.has_fbc = 1,
};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ffb5287e055a..caa9b041616b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -314,8 +314,10 @@ static void __vma_release(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- if (vw->pinned)
+ if (vw->pinned) {
__i915_gem_object_unpin_pages(vw->pinned);
+ i915_gem_object_put(vw->pinned);
+ }
i915_vm_free_pt_stash(vw->vm, &vw->stash);
i915_vm_put(vw->vm);
@@ -431,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
- work->pinned = vma->obj;
+ work->pinned = i915_gem_object_get(vma->obj);
}
} else {
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 6b5e9d88646d..180e1078ef7c 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -87,7 +87,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
}
- if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ if (size > mem->mm.size)
return -E2BIG;
n_pages = size >> ilog2(mem->mm.chunk_size);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 334b0648e253..0aeba8e3af28 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -261,6 +261,82 @@ err_close_objects:
return err;
}
+static int igt_mock_splintered_region(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ unsigned int expected_order;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * Sanity check we can still allocate everything even if the
+ * mm.max_order != mm.size. i.e our starting address space size is not a
+ * power-of-two.
+ */
+
+ size = (SZ_4G - 1) & PAGE_MASK;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ if (mem->mm.size != size) {
+ pr_err("%s size mismatch(%llu != %llu)\n",
+ __func__, mem->mm.size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ expected_order = get_order(rounddown_pow_of_two(size));
+ if (mem->mm.max_order != expected_order) {
+ pr_err("%s order mismatch(%u != %u)\n",
+ __func__, mem->mm.max_order, expected_order);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ close_objects(mem, &objects);
+
+ /*
+ * While we should be able allocate everything without any flag
+ * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
+ * actually limited to the largest power-of-two for the region size i.e
+ * max_order, due to the inner workings of the buddy allocator. So make
+ * sure that does indeed hold true.
+ */
+
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (!IS_ERR(obj)) {
+ pr_err("%s too large contiguous allocation was not rejected\n",
+ __func__);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ pr_err("%s largest possible contiguous allocation failed\n",
+ __func__);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -771,6 +847,7 @@ int intel_memory_region_mock_selftests(void)
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
+ SUBTEST(igt_mock_splintered_region),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 09660f5a0a4c..979d96f27c43 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -24,7 +24,7 @@ mock_object_create(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
- if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ if (size > mem->mm.size)
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc();
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 71d84c7a5378..d07b39b8afd2 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -111,10 +111,6 @@ static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
return 0;
}
-static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -140,7 +136,6 @@ static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
.enable = dw_hdmi_imx_encoder_enable,
- .disable = dw_hdmi_imx_encoder_disable,
.atomic_check = dw_hdmi_imx_atomic_check,
};
@@ -219,15 +214,9 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
+ ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
+ if (ret)
+ return ret;
ret = dw_hdmi_imx_parse_dt(hdmi);
if (ret < 0)
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 7d00c49fd5a5..9bf5ad6d18a2 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -20,6 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -212,7 +213,9 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
@@ -251,7 +254,6 @@ err_poll_fini:
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
err_kms:
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
return ret;
@@ -267,11 +269,9 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
- drm_mode_config_cleanup(drm);
+ drm_dev_put(drm);
dev_set_drvdata(dev, NULL);
-
- drm_dev_put(drm);
}
static const struct component_master_ops imx_drm_ops = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index af757d1e21fe..41e2978cb1eb 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -62,7 +62,6 @@ struct imx_ldb_channel {
struct i2c_adapter *ddc;
int chno;
void *edid;
- int edid_len;
struct drm_display_mode mode;
int mode_valid;
u32 bus_format;
@@ -536,15 +535,14 @@ static int imx_ldb_panel_ddc(struct device *dev,
}
if (!channel->ddc) {
+ int edid_len;
+
/* if no DDC available, fallback to hardcoded EDID */
dev_dbg(dev, "no ddc available\n");
- edidp = of_get_property(child, "edid",
- &channel->edid_len);
+ edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
- channel->edid = kmemdup(edidp,
- channel->edid_len,
- GFP_KERNEL);
+ channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 813bb6156a68..2a8d2e32e7b4 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <video/imx-ipu-v3.h>
@@ -104,8 +103,6 @@ struct imx_tve {
struct drm_connector connector;
struct drm_encoder encoder;
struct device *dev;
- spinlock_t lock; /* register lock */
- bool enabled;
int mode;
int di_hsync_pin;
int di_vsync_pin;
@@ -129,30 +126,10 @@ static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
return container_of(e, struct imx_tve, encoder);
}
-static void tve_lock(void *__tve)
-__acquires(&tve->lock)
-{
- struct imx_tve *tve = __tve;
-
- spin_lock(&tve->lock);
-}
-
-static void tve_unlock(void *__tve)
-__releases(&tve->lock)
-{
- struct imx_tve *tve = __tve;
-
- spin_unlock(&tve->lock);
-}
-
static void tve_enable(struct imx_tve *tve)
{
- if (!tve->enabled) {
- tve->enabled = true;
- clk_prepare_enable(tve->clk);
- regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_EN, TVE_EN);
- }
+ clk_prepare_enable(tve->clk);
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN);
/* clear interrupt status register */
regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff);
@@ -169,11 +146,8 @@ static void tve_enable(struct imx_tve *tve)
static void tve_disable(struct imx_tve *tve)
{
- if (tve->enabled) {
- tve->enabled = false;
- regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
- clk_disable_unprepare(tve->clk);
- }
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
+ clk_disable_unprepare(tve->clk);
}
static int tve_setup_tvout(struct imx_tve *tve)
@@ -500,8 +474,7 @@ static struct regmap_config tve_regmap_config = {
.readable_reg = imx_tve_readable_reg,
- .lock = tve_lock,
- .unlock = tve_unlock,
+ .fast_io = true,
.max_register = 0xdc,
};
@@ -511,7 +484,7 @@ static const char * const imx_tve_modes[] = {
[TVE_MODE_VGA] = "vga",
};
-static const int of_get_tve_mode(struct device_node *np)
+static int of_get_tve_mode(struct device_node *np)
{
const char *bm;
int ret, i;
@@ -544,7 +517,6 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
memset(tve, 0, sizeof(*tve));
tve->dev = dev;
- spin_lock_init(&tve->lock);
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8232f512b9ed..2eb8df4697df 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -28,7 +28,6 @@ struct imx_parallel_display {
struct drm_bridge bridge;
struct device *dev;
void *edid;
- int edid_len;
u32 bus_format;
u32 bus_flags;
struct drm_display_mode mode;
@@ -41,11 +40,6 @@ static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
return container_of(c, struct imx_parallel_display, connector);
}
-static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
-{
- return container_of(e, struct imx_parallel_display, encoder);
-}
-
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
return container_of(b, struct imx_parallel_display, bridge);
@@ -310,6 +304,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
struct device_node *np = dev->of_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
+ int edid_len;
int ret;
u32 bus_format = 0;
const char *fmt;
@@ -323,9 +318,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (ret && ret != -ENODEV)
return ret;
- edidp = of_get_property(np, "edid", &imxpd->edid_len);
+ edidp = of_get_property(np, "edid", &edid_len);
if (edidp)
- imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL);
+ imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL);
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
@@ -349,17 +344,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
return 0;
}
-static void imx_pd_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
-
- kfree(imxpd->edid);
-}
-
static const struct component_ops imx_pd_ops = {
.bind = imx_pd_bind,
- .unbind = imx_pd_unbind,
};
static int imx_pd_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index c592957ed07f..92f8bd907193 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -413,7 +413,13 @@ static int mcde_probe(struct platform_device *pdev)
match);
if (ret) {
dev_err(dev, "failed to add component master\n");
- goto clk_disable;
+ /*
+ * The EPOD regulator is already disabled at this point so some
+ * special errorpath code is needed
+ */
+ clk_disable_unprepare(mcde->mcde_clk);
+ regulator_disable(mcde->vana);
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 498622c0c670..f75088186fba 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -44,6 +44,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
struct nv50_core **);
int core507d_init(struct nv50_core *);
void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_read_caps(struct nv50_disp *disp);
int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
int core507d_update(struct nv50_core *, u32 *, bool);
@@ -55,6 +56,7 @@ extern const struct nv50_outp_func pior507d;
int core827d_new(struct nouveau_drm *, s32, struct nv50_core **);
int core907d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp);
extern const struct nv50_outp_func dac907d;
extern const struct nv50_outp_func sor907d;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index 248edf69e168..e6f16a7750f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -78,19 +78,56 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
}
int
-core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+core507d_read_caps(struct nv50_disp *disp)
{
struct nvif_push *push = disp->core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 2)))
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
return ret;
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+
PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
+
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+
return PUSH_KICK(push);
}
int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ struct nv50_core *core = disp->core;
+ struct nouveau_bo *bo = disp->sync;
+ s64 time;
+ int ret;
+
+ NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1,
+ NVDEF(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE));
+
+ ret = core507d_read_caps(disp);
+ if (ret < 0)
+ return ret;
+
+ time = nvif_msec(core->chan.base.device, 2000ULL,
+ if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
+ NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ if (time < 0)
+ NV_ERROR(drm, "core caps notifier timeout\n");
+
+ return 0;
+}
+
+int
core507d_init(struct nv50_core *core)
{
struct nvif_push *push = core->chan.push;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
index b17c03529c78..8564d4dffaff 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -22,11 +22,45 @@
#include "core.h"
#include "head.h"
+#include <nvif/push507c.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl907d.h>
+
+#include "nouveau_bo.h"
+
+int
+core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ struct nv50_core *core = disp->core;
+ struct nouveau_bo *bo = disp->sync;
+ s64 time;
+ int ret;
+
+ NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV907D_CORE_NOTIFIER_3, CAPABILITIES_4,
+ NVDEF(NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, FALSE));
+
+ ret = core507d_read_caps(disp);
+ if (ret < 0)
+ return ret;
+
+ time = nvif_msec(core->chan.base.device, 2000ULL,
+ if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
+ NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ if (time < 0)
+ NV_ERROR(drm, "core caps notifier timeout\n");
+
+ return 0;
+}
+
static const struct nv50_core_func
core907d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
- .caps_init = core507d_caps_init,
+ .caps_init = core907d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head907d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
index 66846f372080..1cd3a2a35dfb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -26,7 +26,7 @@ static const struct nv50_core_func
core917d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
- .caps_init = core507d_caps_init,
+ .caps_init = core907d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head917d,
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
index 2e444bac701d..6a463f308b64 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
@@ -32,7 +32,10 @@
#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_TRUE 0x00000001
#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_R0 15:1
#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_TIMESTAMP 29:16
-
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1 0x00000001
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE 0:0
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_FALSE 0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_TRUE 0x00000001
// class methods
#define NV507D_UPDATE (0x00000080)
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
index 34bc3eafac7d..79aff6ff3138 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
@@ -24,6 +24,10 @@
#ifndef _cl907d_h_
#define _cl907d_h_
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001
#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014
#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0
#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 49dd0cbc332f..6f21f36719fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1023,29 +1023,6 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
return 112000 * duallink_scale;
}
-enum drm_mode_status
-nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
- const unsigned min_clock,
- const unsigned max_clock,
- unsigned int *clock_out)
-{
- unsigned int clock = mode->clock;
-
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
- DRM_MODE_FLAG_3D_FRAME_PACKING)
- clock *= 2;
-
- if (clock < min_clock)
- return MODE_CLOCK_LOW;
- if (clock > max_clock)
- return MODE_CLOCK_HIGH;
-
- if (clock_out)
- *clock_out = clock;
-
- return MODE_OK;
-}
-
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -1053,7 +1030,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- unsigned min_clock = 25000, max_clock = min_clock;
+ unsigned int min_clock = 25000, max_clock = min_clock, clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
@@ -1082,8 +1059,15 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
- return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
- NULL);
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
}
static struct drm_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 7b640e05bd4c..040ed88d362d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -231,23 +231,30 @@ nv50_dp_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode,
unsigned *out_clock)
{
- const unsigned min_clock = 25000;
- unsigned max_clock, ds_clock, clock;
- enum drm_mode_status ret;
+ const unsigned int min_clock = 25000;
+ unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
+ const u8 bpp = connector->display_info.bpc * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE;
- max_clock = outp->dp.link_nr * outp->dp.link_bw;
- ds_clock = drm_dp_downstream_max_dotclock(outp->dp.dpcd,
- outp->dp.downstream_ports);
- if (ds_clock)
- max_clock = min(max_clock, ds_clock);
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ max_rate = outp->dp.link_nr * outp->dp.link_bw;
+ mode_rate = DIV_ROUND_UP(clock * bpp, 8);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+
+ ds_max_dotclock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, outp->dp.downstream_ports);
+ if (ds_max_dotclock && clock > ds_max_dotclock)
+ return MODE_CLOCK_HIGH;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
- clock = mode->clock * (connector->display_info.bpc * 3) / 10;
- ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
- &clock);
if (out_clock)
*out_clock = clock;
- return ret;
+
+ return MODE_OK;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 89adadf4706b..549bc67feabb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -190,7 +190,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ drm_gem_object_release(&nvbo->bo.base);
+ kfree(nvbo);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 2df1c0460559..4f69e4c3dafd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -105,11 +105,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
unsigned target, cmd, priority;
- unsigned long addr, end, size;
+ unsigned long addr, end;
struct mm_struct *mm;
args->va_start &= PAGE_MASK;
- args->va_end &= PAGE_MASK;
+ args->va_end = ALIGN(args->va_end, PAGE_SIZE);
/* Sanity check arguments */
if (args->reserved0 || args->reserved1)
@@ -118,8 +118,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
if (args->va_start >= args->va_end)
return -EINVAL;
- if (!args->npages)
- return -EINVAL;
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
@@ -151,12 +149,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (args->stride)
return -EINVAL;
- size = ((unsigned long)args->npages) << PAGE_SHIFT;
- if ((args->va_start + size) <= args->va_start)
- return -EINVAL;
- if ((args->va_start + size) > args->va_end)
- return -EINVAL;
-
/*
* Ok we are ask to do something sane, for now we only support migrate
* commands but we will add things like memory policy (what to do on
@@ -171,7 +163,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
}
- for (addr = args->va_start, end = args->va_start + size; addr < end;) {
+ for (addr = args->va_start, end = args->va_end; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index dcb70677d0ac..7851bec5f0e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2924,17 +2924,34 @@ nvkm_device_del(struct nvkm_device **pdevice)
}
}
+/* returns true if the GPU is in the CPU native byte order */
static inline bool
nvkm_device_endianness(struct nvkm_device *device)
{
- u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
#ifdef __BIG_ENDIAN
- if (!boot1)
- return false;
+ const bool big_endian = true;
#else
- if (boot1)
- return false;
+ const bool big_endian = false;
#endif
+
+ /* Read NV_PMC_BOOT_1, and assume non-functional endian switch if it
+ * doesn't contain the expected values.
+ */
+ u32 pmc_boot_1 = nvkm_rd32(device, 0x000004);
+ if (pmc_boot_1 && pmc_boot_1 != 0x01000001)
+ return !big_endian; /* Assume GPU is LE in this case. */
+
+ /* 0 means LE and 0x01000001 means BE GPU. Condition is true when
+ * GPU/CPU endianness don't match.
+ */
+ if (big_endian == !pmc_boot_1) {
+ nvkm_wr32(device, 0x000004, 0x01000001);
+ nvkm_rd32(device, 0x000000);
+ if (nvkm_rd32(device, 0x000004) != (big_endian ? 0x01000001 : 0x00000000))
+ return !big_endian; /* Assume GPU is LE on any unexpected read-back. */
+ }
+
+ /* CPU/GPU endianness should (hopefully) match. */
return true;
}
@@ -2987,14 +3004,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
if (detect) {
/* switch mmio to cpu's native endianness */
if (!nvkm_device_endianness(device)) {
- nvkm_wr32(device, 0x000004, 0x01000001);
- nvkm_rd32(device, 0x000000);
- if (!nvkm_device_endianness(device)) {
- nvdev_error(device,
- "GPU not supported on big-endian\n");
- ret = -ENOSYS;
- goto done;
- }
+ nvdev_error(device,
+ "Couldn't switch GPU to CPUs endianess\n");
+ ret = -ENOSYS;
+ goto done;
}
boot0 = nvkm_rd32(device, 0x000000);
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 3482e28e30fc..0c5f22e95c2d 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -26,7 +26,9 @@
struct mantix {
struct device *dev;
struct drm_panel panel;
+
struct gpio_desc *reset_gpio;
+ struct gpio_desc *tp_rstn_gpio;
struct regulator *avdd;
struct regulator *avee;
@@ -124,6 +126,10 @@ static int mantix_unprepare(struct drm_panel *panel)
{
struct mantix *ctx = panel_to_mantix(panel);
+ gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
regulator_disable(ctx->avee);
regulator_disable(ctx->avdd);
/* T11 */
@@ -165,13 +171,10 @@ static int mantix_prepare(struct drm_panel *panel)
return ret;
}
- /* T3+T5 */
- usleep_range(10000, 12000);
-
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- usleep_range(5150, 7000);
-
+ /* T3 + T4 + time for voltage to become stable: */
+ usleep_range(6000, 7000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 0);
/* T6 */
msleep(50);
@@ -204,7 +207,7 @@ static int mantix_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(mode));
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -236,12 +239,18 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
+ ctx->tp_rstn_gpio = devm_gpiod_get(dev, "mantix,tp-rstn", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->tp_rstn_gpio)) {
+ dev_err(dev, "cannot get tp-rstn gpio\n");
+ return PTR_ERR(ctx->tp_rstn_gpio);
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 37d4cb7a5491..0fc084110e5b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -626,6 +626,7 @@ static int panfrost_probe(struct platform_device *pdev)
err_out1:
pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
+ pm_runtime_set_suspended(pfdev->dev);
err_out0:
drm_dev_put(ddev);
return err;
@@ -640,9 +641,9 @@ static int panfrost_remove(struct platform_device *pdev)
panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
- panfrost_device_fini(pfdev);
- pm_runtime_put_sync_suspend(pfdev->dev);
pm_runtime_disable(pfdev->dev);
+ panfrost_device_fini(pfdev);
+ pm_runtime_set_suspended(pfdev->dev);
drm_dev_put(ddev);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 1a6cea0e0bd7..62d4d710a571 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
- mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
- mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index b3517ff9630c..8088d5fd8480 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 288e46c40673..1b9f68d8e9aa 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ bool ret = false;
if (atomic_read(&bo->gpu_usecount))
return false;
- if (!mutex_trylock(&shmem->pages_lock))
+ if (!mutex_trylock(&bo->mappings.lock))
return false;
- panfrost_gem_teardown_mappings(bo);
+ if (!mutex_trylock(&shmem->pages_lock))
+ goto unlock_mappings;
+
+ panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(obj);
+ ret = true;
mutex_unlock(&shmem->pages_lock);
- return true;
+
+unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
+ return ret;
}
static unsigned long
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index b51cc685c13a..edb60ae0a9b7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
uint64_t modifier = fb->modifier;
+ unsigned int ch1_phase_idx;
u32 out_fmt_val;
u32 in_fmt_val, in_mod_val, in_ps_val;
unsigned int i;
@@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* I have no idea what this does exactly, but it seems to be
* related to the scaler FIR filter phase parameters.
*/
+ ch1_phase_idx = (format->num_planes > 1) ? 1 : 0;
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
- frontend->data->ch_phase[0].horzphase);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
- frontend->data->ch_phase[1].horzphase);
+ frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
- frontend->data->ch_phase[0].vertphase[0]);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
- frontend->data->ch_phase[1].vertphase[0]);
+ frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
- frontend->data->ch_phase[0].vertphase[1]);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
- frontend->data->ch_phase[1].vertphase[1]);
+ frontend->data->ch_phase[ch1_phase_idx]);
/*
* Checking the input format is sufficient since we currently only
@@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = {
};
static const struct sun4i_frontend_data sun4i_a10_frontend = {
- .ch_phase = {
- {
- .horzphase = 0,
- .vertphase = { 0, 0 },
- },
- {
- .horzphase = 0xfc000,
- .vertphase = { 0xfc000, 0xfc000 },
- },
- },
+ .ch_phase = { 0x000, 0xfc000 },
.has_coef_rdy = true,
};
static const struct sun4i_frontend_data sun8i_a33_frontend = {
- .ch_phase = {
- {
- .horzphase = 0x400,
- .vertphase = { 0x400, 0x400 },
- },
- {
- .horzphase = 0x400,
- .vertphase = { 0x400, 0x400 },
- },
- },
+ .ch_phase = { 0x400, 0xfc400 },
.has_coef_access_ctrl = true,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 0c382c1ddb0f..2e7b76e50c2b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -115,11 +115,7 @@ struct reset_control;
struct sun4i_frontend_data {
bool has_coef_access_ctrl;
bool has_coef_rdy;
-
- struct {
- u32 horzphase;
- u32 vertphase[2];
- } ch_phase[2];
+ u32 ch_phase[2];
};
struct sun4i_frontend {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 915f8bfdb58c..182c586525eb 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -568,7 +568,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
- kfree(bin);
v3d_job_put(&render->base);
kfree(bin);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 74ceebd62fbc..cc74a3f3a07a 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -449,7 +449,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
}
if (IS_ERR(cma_obj)) {
- struct drm_printer p = drm_info_printer(vc4->dev->dev);
+ struct drm_printer p = drm_info_printer(vc4->base.dev);
DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
@@ -590,7 +590,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, bo_cache.time_work);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
mutex_lock(&vc4->bo_lock);
vc4_bo_cache_free_old(dev);
@@ -1005,6 +1005,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -1033,10 +1034,10 @@ int vc4_bo_cache_init(struct drm_device *dev)
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
- return 0;
+ return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
}
-void vc4_bo_cache_destroy(struct drm_device *dev)
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index f1a5fd5dab6f..839610f8092a 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -257,37 +257,37 @@ static int vc4_drm_bind(struct device *dev)
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
- if (!vc4)
- return -ENOMEM;
-
/* If VC4 V3D is missing, don't advertise render nodes. */
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
if (!node || !of_device_is_available(node))
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
of_node_put(node);
- drm = drm_dev_alloc(&vc4_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+ if (IS_ERR(vc4))
+ return PTR_ERR(vc4);
+
+ drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- vc4->dev = drm;
- drm->dev_private = vc4;
INIT_LIST_HEAD(&vc4->debugfs_list);
mutex_init(&vc4->bin_bo_lock);
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_put;
+ return ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
- vc4_gem_init(drm);
+ ret = vc4_gem_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(dev, drm);
if (ret)
- goto gem_destroy;
+ return ret;
ret = vc4_plane_create_additional_planes(drm);
if (ret)
@@ -312,29 +312,17 @@ static int vc4_drm_bind(struct device *dev)
unbind_all:
component_unbind_all(dev, drm);
-gem_destroy:
- vc4_gem_destroy(drm);
- vc4_bo_cache_destroy(drm);
-dev_put:
- drm_dev_put(drm);
+
return ret;
}
static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
-
- drm_mode_config_cleanup(drm);
-
- drm_atomic_private_obj_fini(&vc4->load_tracker);
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
-
- drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 90b911fd2a7f..19b75bebd35f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,6 +14,7 @@
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -71,7 +72,7 @@ struct vc4_perfmon {
};
struct vc4_dev {
- struct drm_device *dev;
+ struct drm_device base;
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
@@ -234,7 +235,7 @@ struct vc4_dev {
static inline struct vc4_dev *
to_vc4_dev(struct drm_device *dev)
{
- return (struct vc4_dev *)dev->dev_private;
+ return container_of(dev, struct vc4_dev, base);
}
struct vc4_bo {
@@ -287,7 +288,7 @@ struct vc4_bo {
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return (struct vc4_bo *)bo;
+ return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
@@ -300,7 +301,7 @@ struct vc4_fence {
static inline struct vc4_fence *
to_vc4_fence(struct dma_fence *fence)
{
- return (struct vc4_fence *)fence;
+ return container_of(fence, struct vc4_fence, base);
}
struct vc4_seqno_cb {
@@ -347,7 +348,7 @@ struct vc4_plane {
static inline struct vc4_plane *
to_vc4_plane(struct drm_plane *plane)
{
- return (struct vc4_plane *)plane;
+ return container_of(plane, struct vc4_plane, base);
}
enum vc4_scaling_mode {
@@ -423,7 +424,7 @@ struct vc4_plane_state {
static inline struct vc4_plane_state *
to_vc4_plane_state(struct drm_plane_state *state)
{
- return (struct vc4_plane_state *)state;
+ return container_of(state, struct vc4_plane_state, base);
}
enum vc4_encoder_type {
@@ -499,7 +500,7 @@ struct vc4_crtc {
static inline struct vc4_crtc *
to_vc4_crtc(struct drm_crtc *crtc)
{
- return (struct vc4_crtc *)crtc;
+ return container_of(crtc, struct vc4_crtc, base);
}
static inline const struct vc4_crtc_data *
@@ -537,7 +538,7 @@ struct vc4_crtc_state {
static inline struct vc4_crtc_state *
to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
{
- return (struct vc4_crtc_state *)crtc_state;
+ return container_of(crtc_state, struct vc4_crtc_state, base);
}
#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
@@ -809,7 +810,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
-void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
@@ -874,8 +874,7 @@ extern struct platform_driver vc4_dsi_driver;
extern const struct dma_fence_ops vc4_fence_ops;
/* vc4_gem.c */
-void vc4_gem_init(struct drm_device *dev);
-void vc4_gem_destroy(struct drm_device *dev);
+int vc4_gem_init(struct drm_device *dev);
int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 9f01ddd5b932..b641252939d8 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work)
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, hangcheck.reset_work);
- vc4_save_hang_state(vc4->dev);
+ vc4_save_hang_state(&vc4->base);
- vc4_reset(vc4->dev);
+ vc4_reset(&vc4->base);
}
static void
vc4_hangcheck_elapsed(struct timer_list *t)
{
struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
@@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
list_del(&exec->head);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
- vc4_complete_exec(vc4->dev, exec);
+ vc4_complete_exec(&vc4->base, exec);
spin_lock_irqsave(&vc4->job_lock, irqflags);
}
@@ -1258,13 +1258,13 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0;
fail:
- vc4_complete_exec(vc4->dev, exec);
+ vc4_complete_exec(&vc4->base, exec);
return ret;
}
-void
-vc4_gem_init(struct drm_device *dev)
+static void vc4_gem_destroy(struct drm_device *dev, void *unused);
+int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -1285,10 +1285,11 @@ vc4_gem_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->purgeable.list);
mutex_init(&vc4->purgeable.lock);
+
+ return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
-void
-vc4_gem_destroy(struct drm_device *dev)
+static void vc4_gem_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index e8f99e290655..95779d50cca0 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -922,6 +922,7 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
struct device *dev = &vc4_hdmi->pdev->dev;
u32 audio_packet_config, channel_mask;
u32 channel_map;
@@ -981,6 +982,8 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream,
HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config);
vc4_hdmi_set_n_cts(vc4_hdmi);
+ vc4_hdmi_set_audio_infoframe(encoder);
+
return 0;
}
@@ -988,11 +991,9 @@ static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
- struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- vc4_hdmi_set_audio_infoframe(encoder);
vc4_hdmi->audio.streaming = true;
if (vc4_hdmi->variant->phy_rng_enable)
@@ -1076,6 +1077,7 @@ static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
};
static const struct snd_soc_component_driver vc4_hdmi_audio_component_drv = {
+ .name = "vc4-hdmi-codec-dai-component",
.controls = vc4_hdmi_audio_controls,
.num_controls = ARRAY_SIZE(vc4_hdmi_audio_controls),
.dapm_widgets = vc4_hdmi_audio_widgets,
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 4d0a833366ce..b72b2bd05a81 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -560,7 +560,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
@@ -679,7 +679,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 149825ff5df8..2b951cae04ad 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -51,7 +51,7 @@ static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
struct drm_device *dev = state->dev;
- struct vc4_dev *vc4 = dev->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_private_state *priv_state;
int ret;
@@ -93,6 +93,29 @@ static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
.atomic_destroy_state = vc4_ctm_destroy_state,
};
+static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+}
+
+static int vc4_ctm_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_ctm_state *ctm_state;
+
+ drm_modeset_lock_init(&vc4->ctm_state_lock);
+
+ ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
+ if (!ctm_state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
+ &vc4_ctm_state_funcs);
+
+ return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
+}
+
/* Converts a DRM S31.32 value to the HW S0.9 format. */
static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
{
@@ -609,6 +632,34 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
.atomic_destroy_state = vc4_load_tracker_destroy_state,
};
+static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (!vc4->load_tracker_available)
+ return;
+
+ drm_atomic_private_obj_fini(&vc4->load_tracker);
+}
+
+static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_load_tracker_state *load_state;
+
+ if (!vc4->load_tracker_available)
+ return 0;
+
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
+ &load_state->base,
+ &vc4_load_tracker_state_funcs);
+
+ return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
+}
+
#define NUM_OUTPUTS 6
#define NUM_CHANNELS 3
@@ -711,8 +762,6 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_ctm_state *ctm_state;
- struct vc4_load_tracker_state *load_state;
bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
"brcm,bcm2711-vc5");
int ret;
@@ -751,26 +800,13 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
- drm_modeset_lock_init(&vc4->ctm_state_lock);
-
- ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
- if (!ctm_state)
- return -ENOMEM;
-
- drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
- &vc4_ctm_state_funcs);
-
- if (vc4->load_tracker_available) {
- load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
- if (!load_state) {
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
- return -ENOMEM;
- }
+ ret = vc4_ctm_obj_init(vc4);
+ if (ret)
+ return ret;
- drm_atomic_private_obj_init(dev, &vc4->load_tracker,
- &load_state->base,
- &vc4_load_tracker_state_funcs);
- }
+ ret = vc4_load_tracker_obj_init(vc4);
+ if (ret)
+ return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index f7ab979721b3..65d0dac69b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
{
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
unsigned long irqflags;
int slot;
uint64_t seqno = 0;
@@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
INIT_LIST_HEAD(&list);
while (true) {
- struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true,
+ struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true,
VC4_BO_TYPE_BIN);
if (IS_ERR(bo)) {
@@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
struct vc4_v3d *v3d = dev_get_drvdata(dev);
struct vc4_dev *vc4 = v3d->vc4;
- vc4_irq_uninstall(vc4->dev);
+ vc4_irq_uninstall(&vc4->base);
clk_disable_unprepare(v3d->clk);
@@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev)
if (ret != 0)
return ret;
- vc4_v3d_init_hw(vc4->dev);
+ vc4_v3d_init_hw(&vc4->base);
/* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
- enable_irq(vc4->dev->irq);
- vc4_irq_postinstall(vc4->dev);
+ enable_irq(vc4->base.irq);
+ vc4_irq_postinstall(&vc4->base);
return 0;
}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b3dae9ec1a38..d166ee262ce4 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -133,73 +133,6 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
}
EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
-bool ipu_pixelformat_is_planar(u32 pixelformat)
-{
- switch (pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_YUV422P:
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- return true;
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
-
-enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
-{
- switch (mbus_code & 0xf000) {
- case 0x1000:
- return IPUV3_COLORSPACE_RGB;
- case 0x2000:
- return IPUV3_COLORSPACE_YUV;
- default:
- return IPUV3_COLORSPACE_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
-
-int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
-{
- switch (pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_YUV422P:
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- /*
- * for the planar YUV formats, the stride passed to
- * cpmem must be the stride in bytes of the Y plane.
- * And all the planar YUV formats have an 8-bit
- * Y component.
- */
- return (8 * pixel_stride) >> 3;
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
- return (16 * pixel_stride) >> 3;
- case V4L2_PIX_FMT_BGR24:
- case V4L2_PIX_FMT_RGB24:
- return (24 * pixel_stride) >> 3;
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_XBGR32:
- case V4L2_PIX_FMT_XRGB32:
- return (32 * pixel_stride) >> 3;
- default:
- break;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
-
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
bool hflip, bool vflip)
{
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b64d2efbefe7..eb56e09ae15f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1275,7 +1275,7 @@ static void balloon_up(struct work_struct *dummy)
/* Refuse to balloon below the floor. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
- pr_warn("Balloon request will be partially fulfilled. %s\n",
+ pr_info("Balloon request will be partially fulfilled. %s\n",
avail_pages < num_pages ? "Not enough memory." :
"Balloon floor reached.");
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 6994c1309b2b..cc9e8025c533 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1689,6 +1689,7 @@ static void __exit coresight_exit(void)
module_init(coresight_init);
module_exit(coresight_exit);
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
MODULE_DESCRIPTION("Arm CoreSight tracer driver");
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 392757f3a019..7ff7e7780bbf 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -1065,6 +1065,13 @@ static int cti_create_con_sysfs_attr(struct device *dev,
}
eattr->var = con;
con->con_attrs[attr_idx] = &eattr->attr.attr;
+ /*
+ * Initialize the dynamically allocated attribute
+ * to avoid LOCKDEP splat. See include/linux/sysfs.h
+ * for more details.
+ */
+ sysfs_attr_init(con->con_attrs[attr_idx]);
+
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index c2c9b127d074..bdc34ca449f7 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -210,7 +210,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
u32 id;
int cpu = event->cpu;
cpumask_t *mask;
- struct coresight_device *sink;
+ struct coresight_device *sink = NULL;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(cpu);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a4f473ef4e5c..a97a9d058198 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -733,7 +733,7 @@ config I2C_LPC2K
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
- depends on ARM64
+ depends on MELLANOX_PLATFORM && ARM64
help
Enabling this option will add I2C SMBus support for Mellanox BlueField
system.
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 44974b53a626..0d15f4c1e9f7 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -159,7 +159,6 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
u32 raw_stat, stat, enabled, tmp;
u8 val = 0, slave_activity;
- regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
regmap_read(dev->map, DW_IC_ENABLE, &enabled);
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat);
regmap_read(dev->map, DW_IC_STATUS, &tmp);
@@ -168,32 +167,30 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
return 0;
+ stat = i2c_dw_read_clear_intrbits_slave(dev);
dev_dbg(dev->dev,
"%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n",
enabled, slave_activity, raw_stat, stat);
- if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET))
- i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val);
+ if (stat & DW_IC_INTR_RX_FULL) {
+ if (dev->status != STATUS_WRITE_IN_PROGRESS) {
+ dev->status = STATUS_WRITE_IN_PROGRESS;
+ i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED,
+ &val);
+ }
+
+ regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
+ val = tmp;
+ if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
+ &val))
+ dev_vdbg(dev->dev, "Byte %X acked!", val);
+ }
if (stat & DW_IC_INTR_RD_REQ) {
if (slave_activity) {
- if (stat & DW_IC_INTR_RX_FULL) {
- regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
- val = tmp;
-
- if (!i2c_slave_event(dev->slave,
- I2C_SLAVE_WRITE_RECEIVED,
- &val)) {
- dev_vdbg(dev->dev, "Byte %X acked!",
- val);
- }
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
- } else {
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
- regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
- }
+ regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
+
+ dev->status = STATUS_READ_IN_PROGRESS;
if (!i2c_slave_event(dev->slave,
I2C_SLAVE_READ_REQUESTED,
&val))
@@ -205,21 +202,11 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED,
&val))
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp);
-
- i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
- return 1;
}
- if (stat & DW_IC_INTR_RX_FULL) {
- regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
- val = tmp;
- if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
- &val))
- dev_vdbg(dev->dev, "Byte %X acked!", val);
- } else {
+ if (stat & DW_IC_INTR_STOP_DET) {
+ dev->status = STATUS_IDLE;
i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
}
return 1;
@@ -230,7 +217,6 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id)
struct dw_i2c_dev *dev = dev_id;
int ret;
- i2c_dw_read_clear_intrbits_slave(dev);
ret = i2c_dw_irq_handler_slave(dev);
if (ret > 0)
complete(&dev->cmd_complete);
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index ee59e0da082d..33574d40ea9c 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -62,10 +62,8 @@
* Master. Default value is set to 400MHz.
*/
#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
-/* Reference clock for Bluefield 1 - 156 MHz. */
-#define MLXBF_I2C_TYU_PLL_IN_FREQ (156 * 1000 * 1000)
-/* Reference clock for BlueField 2 - 200 MHz. */
-#define MLXBF_I2C_YU_PLL_IN_FREQ (200 * 1000 * 1000)
+/* Reference clock for Bluefield - 156 MHz. */
+#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
/* Constant used to determine the PLL frequency. */
#define MLNXBF_I2C_COREPLL_CONST 16384
@@ -489,44 +487,6 @@ static struct mutex mlxbf_i2c_bus_lock;
#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
-static void mlxbf_i2c_write(void __iomem *io, int reg, u32 val)
-{
- writel(val, io + reg);
-}
-
-static u32 mlxbf_i2c_read(void __iomem *io, int reg)
-{
- return readl(io + reg);
-}
-
-/*
- * This function is used to read data from Master GW Data Descriptor.
- * Data bytes in the Master GW Data Descriptor are shifted left so the
- * data starts at the MSB of the descriptor registers as set by the
- * underlying hardware. TYU_READ_DATA enables byte swapping while
- * reading data bytes, and MUST be called by the SMBus read routines
- * to copy data from the 32 * 32-bit HW Data registers a.k.a Master GW
- * Data Descriptor.
- */
-static u32 mlxbf_i2c_read_data(void __iomem *io, int reg)
-{
- return (u32)be32_to_cpu(mlxbf_i2c_read(io, reg));
-}
-
-/*
- * This function is used to write data to the Master GW Data Descriptor.
- * Data copied to the Master GW Data Descriptor MUST be shifted left so
- * the data starts at the MSB of the descriptor registers as required by
- * the underlying hardware. TYU_WRITE_DATA enables byte swapping when
- * writing data bytes, and MUST be called by the SMBus write routines to
- * copy data to the 32 * 32-bit HW Data registers a.k.a Master GW Data
- * Descriptor.
- */
-static void mlxbf_i2c_write_data(void __iomem *io, int reg, u32 val)
-{
- mlxbf_i2c_write(io, reg, (u32)cpu_to_be32(val));
-}
-
/*
* Function to poll a set of bits at a specific address; it checks whether
* the bits are equal to zero when eq_zero is set to 'true', and not equal
@@ -541,7 +501,7 @@ static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1;
do {
- bits = mlxbf_i2c_read(io, addr) & mask;
+ bits = readl(io + addr) & mask;
if (eq_zero ? bits == 0 : bits != 0)
return eq_zero ? 1 : bits;
udelay(MLXBF_I2C_POLL_FREQ_IN_USEC);
@@ -609,16 +569,16 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
MLXBF_I2C_SMBUS_TIMEOUT);
/* Read cause status bits. */
- cause_status_bits = mlxbf_i2c_read(priv->mst_cause->io,
- MLXBF_I2C_CAUSE_ARBITER);
+ cause_status_bits = readl(priv->mst_cause->io +
+ MLXBF_I2C_CAUSE_ARBITER);
cause_status_bits &= MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK;
/*
* Parse both Cause and Master GW bits, then return transaction status.
*/
- master_status_bits = mlxbf_i2c_read(priv->smbus->io,
- MLXBF_I2C_SMBUS_MASTER_STATUS);
+ master_status_bits = readl(priv->smbus->io +
+ MLXBF_I2C_SMBUS_MASTER_STATUS);
master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK;
if (mlxbf_i2c_smbus_transaction_success(master_status_bits,
@@ -649,10 +609,17 @@ static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
aligned_length = round_up(length, 4);
- /* Copy data bytes from 4-byte aligned source buffer. */
+ /*
+ * Copy data bytes from 4-byte aligned source buffer.
+ * Data copied to the Master GW Data Descriptor MUST be shifted
+ * left so the data starts at the MSB of the descriptor registers
+ * as required by the underlying hardware. Enable byte swapping
+ * when writing data bytes to the 32 * 32-bit HW Data registers
+ * a.k.a Master GW Data Descriptor.
+ */
for (offset = 0; offset < aligned_length; offset += sizeof(u32)) {
data32 = *((u32 *)(data + offset));
- mlxbf_i2c_write_data(priv->smbus->io, addr + offset, data32);
+ iowrite32be(data32, priv->smbus->io + addr + offset);
}
}
@@ -664,15 +631,23 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
mask = sizeof(u32) - 1;
+ /*
+ * Data bytes in the Master GW Data Descriptor are shifted left
+ * so the data starts at the MSB of the descriptor registers as
+ * set by the underlying hardware. Enable byte swapping while
+ * reading data bytes from the 32 * 32-bit HW Data registers
+ * a.k.a Master GW Data Descriptor.
+ */
+
for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) {
- data32 = mlxbf_i2c_read_data(priv->smbus->io, addr + offset);
+ data32 = ioread32be(priv->smbus->io + addr + offset);
*((u32 *)(data + offset)) = data32;
}
if (!(length & mask))
return;
- data32 = mlxbf_i2c_read_data(priv->smbus->io, addr + offset);
+ data32 = ioread32be(priv->smbus->io + addr + offset);
for (byte = 0; byte < (length & mask); byte++) {
data[offset + byte] = data32 & GENMASK(7, 0);
@@ -698,16 +673,16 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT);
/* Clear status bits. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_STATUS, 0x0);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
/* Set the cause data. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_CAUSE_OR_CLEAR, ~0x0);
+ writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
/* Zero PEC byte. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_PEC, 0x0);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
/* Zero byte count. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_RS_BYTES, 0x0);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_RS_BYTES);
/* GW activation. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW, command);
+ writel(command, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW);
/*
* Poll master status and check status bits. An ACK is sent when
@@ -823,8 +798,8 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
* needs to be 'manually' reset. This should be removed in
* next tag integration.
*/
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_FSM,
- MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK);
+ writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK,
+ priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM);
}
return ret;
@@ -1113,8 +1088,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
timer |= mlxbf_i2c_set_timer(priv, timings->scl_low,
false, MLXBF_I2C_MASK_16,
MLXBF_I2C_SHIFT_16);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH,
- timer);
+ writel(timer, priv->smbus->io +
+ MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH);
timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_0);
@@ -1124,37 +1099,34 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16);
timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE,
- timer);
+ writel(timer, priv->smbus->io +
+ MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE);
timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_THOLD, timer);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- mlxbf_i2c_write(priv->smbus->io,
- MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP, timer);
+ writel(timer, priv->smbus->io +
+ MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP);
timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA,
- timer);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
timer = mlxbf_i2c_set_timer(priv, timings->buf, false,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_THIGH_MAX_TBUF,
- timer);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
timer = timings->timeout;
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT,
- timer);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
enum mlxbf_i2c_timings_config {
@@ -1426,19 +1398,15 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
* platform firmware; disabling the bus might compromise the system
* functionality.
*/
- config_reg = mlxbf_i2c_read(gpio_res->io,
- MLXBF_I2C_GPIO_0_FUNC_EN_0);
+ config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0);
config_reg = MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(priv->bus,
config_reg);
- mlxbf_i2c_write(gpio_res->io, MLXBF_I2C_GPIO_0_FUNC_EN_0,
- config_reg);
+ writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0);
- config_reg = mlxbf_i2c_read(gpio_res->io,
- MLXBF_I2C_GPIO_0_FORCE_OE_EN);
+ config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN);
config_reg = MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(priv->bus,
config_reg);
- mlxbf_i2c_write(gpio_res->io, MLXBF_I2C_GPIO_0_FORCE_OE_EN,
- config_reg);
+ writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN);
mutex_unlock(gpio_res->lock);
@@ -1452,10 +1420,9 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
u32 corepll_val;
u16 core_f;
- pad_frequency = MLXBF_I2C_TYU_PLL_IN_FREQ;
+ pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
- corepll_val = mlxbf_i2c_read(corepll_res->io,
- MLXBF_I2C_CORE_PLL_REG1);
+ corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */
core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
@@ -1488,12 +1455,10 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
u8 core_od, core_r;
u32 core_f;
- pad_frequency = MLXBF_I2C_YU_PLL_IN_FREQ;
+ pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
- corepll_reg1_val = mlxbf_i2c_read(corepll_res->io,
- MLXBF_I2C_CORE_PLL_REG1);
- corepll_reg2_val = mlxbf_i2c_read(corepll_res->io,
- MLXBF_I2C_CORE_PLL_REG2);
+ corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
+ corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */
core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
@@ -1585,7 +1550,7 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
* (7-bit address, 1 status bit (1 if enabled, 0 if not)).
*/
for (reg = 0; reg < reg_cnt; reg++) {
- slave_reg = mlxbf_i2c_read(priv->smbus->io,
+ slave_reg = readl(priv->smbus->io +
MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
/*
* Each register holds 4 slave addresses. So, we have to keep
@@ -1643,8 +1608,8 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
/* Enable the slave address and update the register. */
slave_reg |= (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT) << (byte * 8);
- mlxbf_i2c_write(priv->smbus->io,
- MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4, slave_reg);
+ writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ reg * 0x4);
return 0;
}
@@ -1668,7 +1633,7 @@ static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
* (7-bit address, 1 status bit (1 if enabled, 0 if not)).
*/
for (reg = 0; reg < reg_cnt; reg++) {
- slave_reg = mlxbf_i2c_read(priv->smbus->io,
+ slave_reg = readl(priv->smbus->io +
MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
/* Check whether the address slots are empty. */
@@ -1708,8 +1673,8 @@ static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
/* Cleanup the slave address slot. */
slave_reg &= ~(GENMASK(7, 0) << (slave_byte * 8));
- mlxbf_i2c_write(priv->smbus->io,
- MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4, slave_reg);
+ writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ reg * 0x4);
return 0;
}
@@ -1801,7 +1766,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
int ret;
/* Reset FSM. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_FSM, 0);
+ writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
/*
* Enable slave cause interrupt bits. Drive
@@ -1810,15 +1775,13 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
* masters issue a Read and Write, respectively. But, clear all
* interrupts first.
*/
- mlxbf_i2c_write(priv->slv_cause->io,
- MLXBF_I2C_CAUSE_OR_CLEAR, ~0);
+ writel(~0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
int_reg = MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE;
int_reg |= MLXBF_I2C_CAUSE_WRITE_SUCCESS;
- mlxbf_i2c_write(priv->slv_cause->io,
- MLXBF_I2C_CAUSE_OR_EVTEN0, int_reg);
+ writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0);
/* Finally, set the 'ready' bit to start handling transactions. */
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1);
+ writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
/* Initialize the cause coalesce resource. */
ret = mlxbf_i2c_init_coalesce(pdev, priv);
@@ -1844,23 +1807,21 @@ static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read,
MLXBF_I2C_CAUSE_YU_SLAVE_BIT :
priv->bus + MLXBF_I2C_CAUSE_TYU_SLAVE_BIT;
- coalesce0_reg = mlxbf_i2c_read(priv->coalesce->io,
- MLXBF_I2C_CAUSE_COALESCE_0);
+ coalesce0_reg = readl(priv->coalesce->io + MLXBF_I2C_CAUSE_COALESCE_0);
is_set = coalesce0_reg & (1 << slave_shift);
if (!is_set)
return false;
/* Check the source of the interrupt, i.e. whether a Read or Write. */
- cause_reg = mlxbf_i2c_read(priv->slv_cause->io,
- MLXBF_I2C_CAUSE_ARBITER);
+ cause_reg = readl(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER);
if (cause_reg & MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE)
*read = true;
else if (cause_reg & MLXBF_I2C_CAUSE_WRITE_SUCCESS)
*write = true;
/* Clear cause bits. */
- mlxbf_i2c_write(priv->slv_cause->io, MLXBF_I2C_CAUSE_OR_CLEAR, ~0x0);
+ writel(~0x0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
return true;
}
@@ -1900,8 +1861,8 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
* address, if supplied.
*/
if (recv_bytes > 0) {
- data32 = mlxbf_i2c_read_data(priv->smbus->io,
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+ data32 = ioread32be(priv->smbus->io +
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
/* Parse the received bytes. */
switch (recv_bytes) {
@@ -1966,7 +1927,7 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT);
control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_GW, control32);
+ writel(control32, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_GW);
/*
* Wait until the transfer is completed; the driver will wait
@@ -1975,10 +1936,9 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
mlxbf_smbus_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
/* Release the Slave GW. */
- mlxbf_i2c_write(priv->smbus->io,
- MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES, 0x0);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_PEC, 0x0);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
return 0;
}
@@ -2023,10 +1983,9 @@ static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
/* Release the Slave GW. */
- mlxbf_i2c_write(priv->smbus->io,
- MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES, 0x0);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_PEC, 0x0);
- mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
return ret;
}
@@ -2061,8 +2020,8 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
* slave, if the higher 8 bits are sent then the slave expect N bytes
* from the master.
*/
- rw_bytes_reg = mlxbf_i2c_read(priv->smbus->io,
- MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ rw_bytes_reg = readl(priv->smbus->io +
+ MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0);
/*
@@ -2264,6 +2223,7 @@ static const struct of_device_id mlxbf_i2c_dt_ids[] = {
MODULE_DEVICE_TABLE(of, mlxbf_i2c_dt_ids);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = {
{ "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] },
{ "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] },
@@ -2305,6 +2265,12 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
return ret;
}
+#else
+static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
static int mlxbf_i2c_of_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
{
@@ -2473,7 +2439,9 @@ static struct platform_driver mlxbf_i2c_driver = {
.driver = {
.name = "i2c-mlxbf",
.of_match_table = mlxbf_i2c_dt_ids,
+#ifdef CONFIG_ACPI
.acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
+#endif /* CONFIG_ACPI */
},
};
@@ -2502,5 +2470,5 @@ static void __exit mlxbf_i2c_exit(void)
module_exit(mlxbf_i2c_exit);
MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver");
-MODULE_AUTHOR("Khalil Blaiech <kblaiech@mellanox.com>");
+MODULE_AUTHOR("Khalil Blaiech <kblaiech@nvidia.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 0cbdfbe605b5..33de99b7bc20 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -475,6 +475,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+
mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
/* Set ioconfig */
@@ -529,10 +533,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
-
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
}
static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index cab725559999..bdd60770779a 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -129,6 +129,7 @@ struct sh_mobile_i2c_data {
int sr;
bool send_stop;
bool stop_after_dma;
+ bool atomic_xfer;
struct resource *res;
struct dma_chan *dma_tx;
@@ -330,13 +331,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op
ret = iic_rd(pd, ICDR);
break;
case OP_RX_STOP: /* enable DTE interrupt, issue stop */
- iic_wr(pd, ICIC,
- ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ if (!pd->atomic_xfer)
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
break;
case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
- iic_wr(pd, ICIC,
- ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ if (!pd->atomic_xfer)
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
ret = iic_rd(pd, ICDR);
iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
break;
@@ -429,7 +432,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
if (wakeup) {
pd->sr |= SW_DONE;
- wake_up(&pd->wait);
+ if (!pd->atomic_xfer)
+ wake_up(&pd->wait);
}
/* defeat write posting to avoid spurious WAIT interrupts */
@@ -581,6 +585,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
pd->pos = -1;
pd->sr = 0;
+ if (pd->atomic_xfer)
+ return;
+
pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8);
if (pd->dma_buf)
sh_mobile_i2c_xfer_dma(pd);
@@ -637,15 +644,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd)
return i ? 0 : -ETIMEDOUT;
}
-static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
+static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
+ struct i2c_msg *msgs, int num)
{
- struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
struct i2c_msg *msg;
int err = 0;
int i;
- long timeout;
+ long time_left;
/* Wake up device and enable clock */
pm_runtime_get_sync(pd->dev);
@@ -662,15 +667,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
if (do_start)
i2c_op(pd, OP_START);
- /* The interrupt handler takes care of the rest... */
- timeout = wait_event_timeout(pd->wait,
- pd->sr & (ICSR_TACK | SW_DONE),
- adapter->timeout);
-
- /* 'stop_after_dma' tells if DMA transfer was complete */
- i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+ if (pd->atomic_xfer) {
+ unsigned long j = jiffies + pd->adap.timeout;
+
+ time_left = time_before_eq(jiffies, j);
+ while (time_left &&
+ !(pd->sr & (ICSR_TACK | SW_DONE))) {
+ unsigned char sr = iic_rd(pd, ICSR);
+
+ if (sr & (ICSR_AL | ICSR_TACK |
+ ICSR_WAIT | ICSR_DTE)) {
+ sh_mobile_i2c_isr(0, pd);
+ udelay(150);
+ } else {
+ cpu_relax();
+ }
+ time_left = time_before_eq(jiffies, j);
+ }
+ } else {
+ /* The interrupt handler takes care of the rest... */
+ time_left = wait_event_timeout(pd->wait,
+ pd->sr & (ICSR_TACK | SW_DONE),
+ pd->adap.timeout);
+
+ /* 'stop_after_dma' tells if DMA xfer was complete */
+ i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg,
+ pd->stop_after_dma);
+ }
- if (!timeout) {
+ if (!time_left) {
dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
sh_mobile_i2c_cleanup_dma(pd);
@@ -696,14 +721,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
return err ?: num;
}
+static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+
+ pd->atomic_xfer = false;
+ return sh_mobile_xfer(pd, msgs, num);
+}
+
+static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+
+ pd->atomic_xfer = true;
+ return sh_mobile_xfer(pd, msgs, num);
+}
+
static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
- .functionality = sh_mobile_i2c_func,
- .master_xfer = sh_mobile_i2c_xfer,
+ .functionality = sh_mobile_i2c_func,
+ .master_xfer = sh_mobile_i2c_xfer,
+ .master_xfer_atomic = sh_mobile_i2c_xfer_atomic,
};
static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 56f5b8077cba..01bace49a962 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1239,7 +1239,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
- if (intel_idle_max_cstate_reached(cstate))
+ if (intel_idle_max_cstate_reached(cstate - 1))
break;
cx = &acpi_state_table.states[cstate];
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 7c2ab1f2fbea..a77750b8954d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
/*
* The FSM uses a funny double locking where state is protected by both
* the handler_mutex and the spinlock. State is not allowed to change
- * away from a handler_mutex protected value without also holding
+ * to/from a handler_mutex protected value without also holding
* handler_mutex.
*/
- if (comp == RDMA_CM_CONNECT)
+ if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
lockdep_assert_held(&id_priv->handler_mutex);
spin_lock_irqsave(&id_priv->lock, flags);
@@ -4038,17 +4038,23 @@ out:
return ret;
}
-int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+/**
+ * rdma_connect_locked - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Same as rdma_connect() but can only be called from the
+ * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
+ */
+int rdma_connect_locked(struct rdma_cm_id *id,
+ struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
- mutex_lock(&id_priv->handler_mutex);
- if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) {
- ret = -EINVAL;
- goto err_unlock;
- }
+ if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
+ return -EINVAL;
if (!id->qp) {
id_priv->qp_num = conn_param->qp_num;
@@ -4066,11 +4072,33 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
ret = -ENOSYS;
if (ret)
goto err_state;
- mutex_unlock(&id_priv->handler_mutex);
return 0;
err_state:
cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
-err_unlock:
+ return ret;
+}
+EXPORT_SYMBOL(rdma_connect_locked);
+
+/**
+ * rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Users must have resolved a route for the rdma_cm_id to connect with by having
+ * called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP information
+ * for unconnected rdma_cm_id's. The actual operation is based on the
+ * rdma_cm_id's port space.
+ */
+int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ ret = rdma_connect_locked(id, conn_param);
mutex_unlock(&id_priv->handler_mutex);
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index f367d523a46b..302f898c5833 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -401,9 +401,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
if (!rdma_is_port_valid(ib_dev, port_num))
return -EINVAL;
- if (!rdma_ib_or_roce(ib_dev, port_num))
- return -EOPNOTSUPP;
-
gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
if (IS_ERR(gid_attr))
return PTR_ERR(gid_attr);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 89e04ca62ae0..246e3cbe0b2c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3305,7 +3305,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
+ &dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@@ -3317,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
+ &dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index c7169d2c69e5..c4bc58736e48 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
listener->qed_handle);
cm_id->rem_ref(cm_id);
+ kfree(listener);
return rc;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index f0e5ffba2d51..97ed8f952f6e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -176,7 +176,7 @@ struct pvrdma_port_attr {
u8 subnet_timeout;
u8 init_type_reply;
u8 active_width;
- u16 active_speed;
+ u8 active_speed;
u8 phys_state;
u8 reserved[2];
};
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 52218684ad4a..670a9623b46e 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -524,6 +524,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
int rvt_register_device(struct rvt_dev_info *rdi)
{
int ret = 0, i;
+ u64 dma_mask;
if (!rdi)
return -EINVAL;
@@ -580,8 +581,10 @@ int rvt_register_device(struct rvt_dev_info *rdi)
/* DMA Operations */
rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
- dma_set_coherent_mask(&rdi->ibdev.dev,
- rdi->ibdev.dev.parent->coherent_dma_mask);
+ dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
+ if (ret)
+ goto bail_wss;
/* Protection Domain */
spin_lock_init(&rdi->n_pds_lock);
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 38021e2c8688..df0d173d6acb 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -16,15 +16,24 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av)
int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
struct rxe_port *port;
+ int type;
port = &rxe->port;
if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
- u8 sgid_index = rdma_ah_read_grh(attr)->sgid_index;
+ if (grh->sgid_index > port->attr.gid_tbl_len) {
+ pr_warn("invalid sgid index = %d\n",
+ grh->sgid_index);
+ return -EINVAL;
+ }
- if (sgid_index > port->attr.gid_tbl_len) {
- pr_warn("invalid sgid index = %d\n", sgid_index);
+ type = rdma_gid_attr_network_type(grh->sgid_attr);
+ if (type < RDMA_NETWORK_IPV4 ||
+ type > RDMA_NETWORK_IPV6) {
+ pr_warn("invalid network type for rdma_rxe = %d\n",
+ type);
return -EINVAL;
}
}
@@ -65,11 +74,29 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr)
void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
{
const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;
+ int ibtype;
+ int type;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
&rdma_ah_read_grh(attr)->dgid);
- av->network_type = rdma_gid_attr_network_type(sgid_attr);
+
+ ibtype = rdma_gid_attr_network_type(sgid_attr);
+
+ switch (ibtype) {
+ case RDMA_NETWORK_IPV4:
+ type = RXE_NETWORK_TYPE_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ type = RXE_NETWORK_TYPE_IPV4;
+ break;
+ default:
+ /* not reached - checked in rxe_av_chk_attr */
+ type = 0;
+ break;
+ }
+
+ av->network_type = type;
}
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 575e1a4ec821..34bef7d8e6b4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -442,7 +442,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
if (IS_ERR(attr))
return NULL;
- if (av->network_type == RXE_NETWORK_TYPE_IPV6)
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
sizeof(struct iphdr);
else
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 1fc022362fbe..f9c832e82552 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1118,6 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int err;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
+ u64 dma_mask;
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
@@ -1130,7 +1131,10 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
rxe->ndev->dev_addr);
dev->dev.dma_parms = &rxe->dma_parms;
dma_set_max_seg_size(&dev->dev, UINT_MAX);
- dma_set_coherent_mask(&dev->dev, dma_get_required_mask(&dev->dev));
+ dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
+ if (err)
+ return err;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
| BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index ca8bc7296867..181e06c1c43d 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -306,6 +306,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
struct siw_device *sdev = NULL;
struct ib_device *base_dev;
struct device *parent = netdev->dev.parent;
+ u64 dma_mask;
int rv;
if (!parent) {
@@ -384,8 +385,10 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
base_dev->dev.parent = parent;
base_dev->dev.dma_parms = &sdev->dma_parms;
dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
- dma_set_coherent_mask(&base_dev->dev,
- dma_get_required_mask(&base_dev->dev));
+ dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
+ goto error;
+
base_dev->num_comp_vectors = num_possible_cpus();
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2f3ebc0a75d9..2bd18b006893 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
- ret = rdma_connect(cma_id, &conn_param);
+ ret = rdma_connect_locked(cma_id, &conn_param);
if (ret) {
iser_err("failure connecting: %d\n", ret);
goto failure;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 776e89231c52..f298adc02acb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
uuid_copy(&msg.sess_uuid, &sess->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
- err = rdma_connect(con->c.cm_id, &param);
+ err = rdma_connect_locked(con->c.cm_id, &param);
if (err)
- rtrs_err(clt, "rdma_connect(): %d\n", err);
+ rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
return err;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0065eb17ae36..53a8becac827 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -622,10 +622,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
/**
* srpt_unregister_mad_agent - unregister MAD callback functions
* @sdev: SRPT HCA pointer.
+ * @port_cnt: number of ports with registered MAD
*
* Note: It is safe to call this function more than once for the same device.
*/
-static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
{
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
@@ -633,7 +634,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
struct srpt_port *sport;
int i;
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ for (i = 1; i <= port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
if (sport->mad_agent) {
@@ -3185,7 +3186,8 @@ static int srpt_add_one(struct ib_device *device)
if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
- goto err_event;
+ i--;
+ goto err_port;
}
}
@@ -3197,7 +3199,8 @@ static int srpt_add_one(struct ib_device *device)
pr_debug("added %s.\n", dev_name(&device->dev));
return 0;
-err_event:
+err_port:
+ srpt_unregister_mad_agent(sdev, i);
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
@@ -3221,7 +3224,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
struct srpt_device *sdev = client_data;
int i;
- srpt_unregister_mad_agent(sdev);
+ srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
ib_unregister_event_handler(&sdev->event_handler);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 41435a699b53..bdeb010efee6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -256,6 +256,7 @@ enum rdma_ch_state {
* @rdma_cm: See below.
* @rdma_cm.cm_id: RDMA CM ID associated with the channel.
* @cq: IB completion queue for this channel.
+ * @cq_size: Number of CQEs in @cq.
* @zw_cqe: Zero-length write CQE.
* @rcu: RCU head.
* @kref: kref for this channel.
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index eea47b4c84aa..974a66725d09 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -971,6 +971,9 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
}
node->avg_bw = node->init_avg;
node->peak_bw = node->init_peak;
+ if (provider->aggregate)
+ provider->aggregate(node, 0, node->init_avg, node->init_peak,
+ &node->avg_bw, &node->peak_bw);
provider->set(node, node);
node->avg_bw = 0;
node->peak_bw = 0;
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index cf10a4b9611b..bf01d09dba6c 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -79,6 +79,7 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
struct icc_node *node;
if (!src)
@@ -87,6 +88,12 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
node = src;
qp = to_qcom_provider(node->provider);
+ qn = node->data;
+
+ qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
+ node->avg_bw);
+ qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
+ node->peak_bw);
qcom_icc_bcm_voter_commit(qp->voter);
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index bf11b82ed55c..8d9044ed18ab 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -553,6 +553,9 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
}
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -576,9 +579,6 @@ static int qnoc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
platform_set_drvdata(pdev, qp);
return 0;
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index d79e3163e2c3..5304aea3b058 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -151,7 +151,7 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
@@ -489,6 +489,9 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
}
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -512,9 +515,6 @@ static int qnoc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
platform_set_drvdata(pdev, qp);
return 0;
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 9218efed04a0..c76b2c7f9b10 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -551,6 +551,9 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
}
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -574,9 +577,6 @@ static int qnoc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
platform_set_drvdata(pdev, qp);
return 0;
@@ -627,6 +627,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 9b58946f7898..cc558fec74e3 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -567,6 +567,9 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
}
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -590,9 +593,6 @@ static int qnoc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
platform_set_drvdata(pdev, qp);
return 0;
@@ -643,6 +643,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index f696ac7c5f89..89647700bab2 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -409,7 +409,11 @@ extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
-#define MAX_IRQS_PER_TABLE 256
+/*
+ * AMD IOMMU hardware only support 512 IRTEs despite
+ * the architectural limitation of 2048 entries.
+ */
+#define MAX_IRQS_PER_TABLE 512
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 8651f6d4dfa0..c6622011d493 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2525,6 +2525,9 @@ struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(!dev || !dev->iommu))
+ return NULL;
+
if (unlikely(attach_deferred(dev)))
return NULL;
@@ -3815,9 +3818,8 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
* page aligned, we don't need to use a bounce page.
*/
if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
- tlb_addr = swiotlb_tbl_map_single(dev,
- phys_to_dma_unencrypted(dev, io_tlb_start),
- paddr, size, aligned_size, dir, attrs);
+ tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
+ aligned_size, dir, attrs);
if (tlb_addr == DMA_MAPPING_ERROR) {
goto swiotlb_error;
} else {
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index f1861fa3d0e4..3242ebd0bca3 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -279,6 +279,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
+ struct device_domain_info *info;
struct intel_svm *svm = NULL;
int ret = 0;
@@ -310,6 +311,10 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
return -EINVAL;
+ info = get_domain_info(dev);
+ if (!info)
+ return -EINVAL;
+
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
@@ -357,6 +362,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
goto out;
}
sdev->dev = dev;
+ sdev->sid = PCI_DEVID(info->bus, info->devfn);
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -1029,7 +1035,7 @@ no_pasid:
resp.qw0 = QI_PGRP_PASID(req->pasid) |
QI_PGRP_DID(req->rid) |
QI_PGRP_PASID_P(req->pasid_present) |
- QI_PGRP_PDP(req->pasid_present) |
+ QI_PGRP_PDP(req->priv_data_present) |
QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
resp.qw1 = QI_PGRP_IDX(req->prg_index) |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8c470f451a32..b53446bb8c6b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2071,7 +2071,7 @@ EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
{
- u32 mask;
+ u64 mask;
int i;
if (data->version != IOMMU_GPASID_BIND_VERSION_1)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c6098eee0c7c..2aa79c32ee22 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -180,7 +180,6 @@ config IRQ_MIPS_CPU
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN
- select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config CLPS711X_IRQCHIP
@@ -315,7 +314,6 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
select GENERIC_IRQ_IPI
- select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
config INGENIC_IRQ
@@ -591,6 +589,7 @@ config LOONGSON_PCH_MSI
config MST_IRQ
bool "MStar Interrupt Controller"
+ depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
default ARCH_MEDIATEK
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 97838eb705f9..cbc7c740e4dc 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -244,7 +244,7 @@ static int bcm2836_cpu_dying(unsigned int cpu)
#define BITS_PER_MBOX 32
-static void bcm2836_arm_irqchip_smp_init(void)
+static void __init bcm2836_arm_irqchip_smp_init(void)
{
struct irq_fwspec ipi_fwspec = {
.fwnode = intc.domain->fwnode,
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index 4be077591898..143657b0cf28 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -154,8 +154,8 @@ static const struct irq_domain_ops mst_intc_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-int __init
-mst_intc_of_init(struct device_node *dn, struct device_node *parent)
+static int __init mst_intc_of_init(struct device_node *dn,
+ struct device_node *parent)
{
struct irq_domain *domain, *domain_parent;
struct mst_intc_chip_data *cd;
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 3819185bfd02..cb7f60b3b4a9 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -71,8 +71,7 @@ struct intc_irqpin_priv {
};
struct intc_irqpin_config {
- unsigned int irlm_bit;
- unsigned needs_irlm:1;
+ int irlm_bit; /* -1 if non-existent */
};
static unsigned long intc_irqpin_read32(void __iomem *iomem)
@@ -349,11 +348,10 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
.irlm_bit = 23, /* ICR0.IRLM0 */
- .needs_irlm = 1,
};
static const struct intc_irqpin_config intc_irqpin_rmobile = {
- .needs_irlm = 0,
+ .irlm_bit = -1,
};
static const struct of_device_id intc_irqpin_dt_ids[] = {
@@ -470,7 +468,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
/* configure "individual IRQ mode" where needed */
- if (config && config->needs_irlm) {
+ if (config && config->irlm_bit >= 0) {
if (io[INTC_IRQPIN_REG_IRLM])
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
config->irlm_bit, 1, 1);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index eaa3e9fe54e9..6f432d2a5ceb 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -99,7 +99,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
struct irq_data *d, int enable)
{
int cpu;
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
@@ -115,7 +115,7 @@ static void plic_irq_unmask(struct irq_data *d)
{
struct cpumask amask;
unsigned int cpu;
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
@@ -127,7 +127,7 @@ static void plic_irq_unmask(struct irq_data *d)
static void plic_irq_mask(struct irq_data *d)
{
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
plic_irq_toggle(&priv->lmask, d, 0);
}
@@ -138,7 +138,7 @@ static int plic_set_affinity(struct irq_data *d,
{
unsigned int cpu;
struct cpumask amask;
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
cpumask_and(&amask, &priv->lmask, mask_val);
@@ -151,7 +151,7 @@ static int plic_set_affinity(struct irq_data *d,
return -EINVAL;
plic_irq_toggle(&priv->lmask, d, 0);
- plic_irq_toggle(cpumask_of(cpu), d, 1);
+ plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 0c2c61db26b4..8662d7b7b262 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -195,6 +195,10 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
{ .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
{ .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
{ .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct },
{ .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct },
{ .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct },
{ .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip },
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index e0cceb81c648..b2ab8db439d9 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -85,6 +85,17 @@ struct ti_sci_inta_vint_desc {
* @base: Base address of the memory mapped IO registers
* @pdev: Pointer to platform device.
* @ti_sci_id: TI-SCI device identifier
+ * @unmapped_cnt: Number of @unmapped_dev_ids entries
+ * @unmapped_dev_ids: Pointer to an array of TI-SCI device identifiers of
+ * unmapped event sources.
+ * Unmapped Events are not part of the Global Event Map and
+ * they are converted to Global event within INTA to be
+ * received by the same INTA to generate an interrupt.
+ * In case an interrupt request comes for a device which is
+ * generating Unmapped Event, we must use the INTA's TI-SCI
+ * device identifier in place of the source device
+ * identifier to let sysfw know where it has to program the
+ * Global Event number.
*/
struct ti_sci_inta_irq_domain {
const struct ti_sci_handle *sci;
@@ -96,11 +107,37 @@ struct ti_sci_inta_irq_domain {
void __iomem *base;
struct platform_device *pdev;
u32 ti_sci_id;
+
+ int unmapped_cnt;
+ u16 *unmapped_dev_ids;
};
#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
events[i])
+static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq)
+{
+ u16 dev_id = HWIRQ_TO_DEVID(hwirq);
+ int i;
+
+ if (inta->unmapped_cnt == 0)
+ return dev_id;
+
+ /*
+ * For devices sending Unmapped Events we must use the INTA's TI-SCI
+ * device identifier number to be able to convert it to a Global Event
+ * and map it to an interrupt.
+ */
+ for (i = 0; i < inta->unmapped_cnt; i++) {
+ if (dev_id == inta->unmapped_dev_ids[i]) {
+ dev_id = inta->ti_sci_id;
+ break;
+ }
+ }
+
+ return dev_id;
+}
+
/**
* ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
* @desc: Pointer to irq_desc corresponding to the irq
@@ -251,7 +288,7 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta
u16 dev_id, dev_index;
int err;
- dev_id = HWIRQ_TO_DEVID(hwirq);
+ dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
dev_index = HWIRQ_TO_IRQID(hwirq);
event_desc = &vint_desc->events[free_bit];
@@ -352,14 +389,15 @@ static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
{
struct ti_sci_inta_vint_desc *vint_desc;
struct ti_sci_inta_irq_domain *inta;
+ u16 dev_id;
vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
inta = vint_desc->domain->host_data;
+ dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
/* free event irq */
mutex_lock(&inta->vint_mutex);
inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
- HWIRQ_TO_DEVID(hwirq),
- HWIRQ_TO_IRQID(hwirq),
+ dev_id, HWIRQ_TO_IRQID(hwirq),
inta->ti_sci_id,
vint_desc->vint_id,
event_desc->global_event,
@@ -574,6 +612,41 @@ static struct msi_domain_info ti_sci_inta_msi_domain_info = {
.chip = &ti_sci_inta_msi_irq_chip,
};
+static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta)
+{
+ struct device *dev = &inta->pdev->dev;
+ struct device_node *node = dev_of_node(dev);
+ struct of_phandle_iterator it;
+ int count, err, ret, i;
+
+ count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL);
+ if (count <= 0)
+ return 0;
+
+ inta->unmapped_dev_ids = devm_kcalloc(dev, count,
+ sizeof(*inta->unmapped_dev_ids),
+ GFP_KERNEL);
+ if (!inta->unmapped_dev_ids)
+ return -ENOMEM;
+
+ i = 0;
+ of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) {
+ u32 dev_id;
+
+ ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node);
+ of_node_put(it.node);
+ return ret;
+ }
+ inta->unmapped_dev_ids[i++] = dev_id;
+ }
+
+ inta->unmapped_cnt = count;
+
+ return 0;
+}
+
static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain, *domain, *msi_domain;
@@ -629,6 +702,10 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
if (IS_ERR(inta->base))
return PTR_ERR(inta->base);
+ ret = ti_sci_inta_get_unmapped_sources(inta);
+ if (ret)
+ return ret;
+
domain = irq_domain_add_linear(dev_of_node(dev),
ti_sci_get_num_resources(inta->vint),
&ti_sci_inta_irq_domain_ops, inta);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a5ef9faf71c7..e7f0d4ae0f96 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1176,8 +1176,10 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if((hd = shost_priv(host)) == NULL)
- return;
+ if (host == NULL)
+ hd = NULL;
+ else
+ hd = shost_priv(host);
mptscsih_shutdown(pdev);
@@ -1193,14 +1195,15 @@ mptscsih_remove(struct pci_dev *pdev)
"Free'd ScsiLookup (%d) memory\n",
ioc->name, sz1));
- kfree(hd->info_kbuf);
+ if (hd)
+ kfree(hd->info_kbuf);
/* NULL the Scsi_Host pointer
*/
ioc->sh = NULL;
- scsi_host_put(host);
-
+ if (host)
+ scsi_host_put(host);
mpt_detach(pdev);
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d5ce8082b0a0..fafa8b0d8099 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -474,7 +474,6 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
-source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2521359e8ef7..d23231e73330 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,7 +46,6 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
-obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_CXL_BASE) += cxl/
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 8bac86c4d86b..df2fb9520dd8 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -224,7 +224,7 @@ struct mei_ext_hdr {
u8 type;
u8 length;
u8 ext_payload[2];
- u8 hdr[0];
+ u8 hdr[];
};
/**
@@ -238,7 +238,7 @@ struct mei_ext_meta_hdr {
u8 count;
u8 size;
u8 reserved[2];
- struct mei_ext_hdr hdrs[0];
+ struct mei_ext_hdr hdrs[];
};
/*
@@ -308,7 +308,7 @@ struct mei_msg_hdr {
u32 dma_ring:1;
u32 internal:1;
u32 msg_complete:1;
- u32 extension[0];
+ u32 extension[];
} __packed;
/* The length is up to 9 bits */
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
deleted file mode 100644
index 8a7c2c5711d5..000000000000
--- a/drivers/misc/mic/Kconfig
+++ /dev/null
@@ -1,141 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "Intel MIC & related support"
-
-config INTEL_MIC_BUS
- tristate "Intel MIC Bus Driver"
- depends on 64BIT && PCI && X86
- select DMA_OPS
- help
- This option is selected by any driver which registers a
- device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
- CONFIG_INTEL_MIC_CARD, CONFIG_INTEL_MIC_X100_DMA etc.
-
- If you are building a host/card kernel with an Intel MIC device
- then say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config SCIF_BUS
- tristate "SCIF Bus Driver"
- depends on 64BIT && PCI && X86
- select DMA_OPS
- help
- This option is selected by any driver which registers a
- device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
- and CONFIG_INTEL_MIC_CARD.
-
- If you are building a host/card kernel with an Intel MIC device
- then say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config VOP_BUS
- tristate "VOP Bus Driver"
- depends on HAS_DMA
- select DMA_OPS
- help
- This option is selected by any driver which registers a
- device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
- and CONFIG_INTEL_MIC_CARD.
-
- If you are building a host/card kernel with an Intel MIC device
- then say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config INTEL_MIC_HOST
- tristate "Intel MIC Host Driver"
- depends on 64BIT && PCI && X86
- depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
- select DMA_OPS
- help
- This enables Host Driver support for the Intel Many Integrated
- Core (MIC) family of PCIe form factor coprocessor devices that
- run a 64 bit Linux OS. The driver manages card OS state and
- enables communication between host and card. Intel MIC X100
- devices are currently supported.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config INTEL_MIC_CARD
- tristate "Intel MIC Card Driver"
- depends on 64BIT && X86
- depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
- select VIRTIO
- help
- This enables card driver support for the Intel Many Integrated
- Core (MIC) device family. The card driver communicates shutdown/
- crash events to the host and allows registration/configuration of
- virtio devices. Intel MIC X100 devices are currently supported.
-
- If you are building a card kernel for an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- For more information see
- <http://software.intel.com/en-us/mic-developer>.
-
-config SCIF
- tristate "SCIF Driver"
- depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
- select IOMMU_IOVA
- help
- This enables SCIF Driver support for the Intel Many Integrated
- Core (MIC) family of PCIe form factor coprocessor devices that
- run a 64 bit Linux OS. The Symmetric Communication Interface
- (SCIF (pronounced as skiff)) is a low level communications API
- across PCIe currently implemented for MIC.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config MIC_COSM
- tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
- depends on 64BIT && PCI && X86 && SCIF
- help
- This enables COSM driver support for the Intel Many
- Integrated Core (MIC) family of PCIe form factor coprocessor
- devices. COSM drivers implement functions such as boot,
- shutdown, reset and reboot of MIC devices.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config VOP
- tristate "VOP Driver"
- depends on VOP_BUS
- select VHOST_RING
- select VIRTIO
- help
- This enables VOP (Virtio over PCIe) Driver support for the Intel
- Many Integrated Core (MIC) family of PCIe form factor coprocessor
- devices. The VOP driver allows virtio drivers, e.g. net, console
- and block drivers, on the card connect to user space virtio
- devices on the host.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-endmenu
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
deleted file mode 100644
index 1a43622b183f..000000000000
--- a/drivers/misc/mic/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2013, Intel Corporation.
-#
-obj-$(CONFIG_INTEL_MIC_HOST) += host/
-obj-$(CONFIG_INTEL_MIC_CARD) += card/
-obj-y += bus/
-obj-$(CONFIG_SCIF) += scif/
-obj-$(CONFIG_MIC_COSM) += cosm/
-obj-$(CONFIG_MIC_COSM) += cosm_client/
-obj-$(CONFIG_VOP) += vop/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
deleted file mode 100644
index 0a6aa21b2f67..000000000000
--- a/drivers/misc/mic/bus/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2014, Intel Corporation.
-#
-obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
-obj-$(CONFIG_SCIF_BUS) += scif_bus.o
-obj-$(CONFIG_MIC_COSM) += cosm_bus.o
-obj-$(CONFIG_VOP_BUS) += vop_bus.o
diff --git a/drivers/misc/mic/bus/cosm_bus.c b/drivers/misc/mic/bus/cosm_bus.c
deleted file mode 100644
index 5f2141c71738..000000000000
--- a/drivers/misc/mic/bus/cosm_bus.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC COSM Bus Driver
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include "cosm_bus.h"
-
-/* Unique numbering for cosm devices. */
-static DEFINE_IDA(cosm_index_ida);
-
-static int cosm_dev_probe(struct device *d)
-{
- struct cosm_device *dev = dev_to_cosm(d);
- struct cosm_driver *drv = drv_to_cosm(dev->dev.driver);
-
- return drv->probe(dev);
-}
-
-static int cosm_dev_remove(struct device *d)
-{
- struct cosm_device *dev = dev_to_cosm(d);
- struct cosm_driver *drv = drv_to_cosm(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type cosm_bus = {
- .name = "cosm_bus",
- .probe = cosm_dev_probe,
- .remove = cosm_dev_remove,
-};
-
-int cosm_register_driver(struct cosm_driver *driver)
-{
- driver->driver.bus = &cosm_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(cosm_register_driver);
-
-void cosm_unregister_driver(struct cosm_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(cosm_unregister_driver);
-
-static inline void cosm_release_dev(struct device *d)
-{
- struct cosm_device *cdev = dev_to_cosm(d);
-
- kfree(cdev);
-}
-
-struct cosm_device *
-cosm_register_device(struct device *pdev, struct cosm_hw_ops *hw_ops)
-{
- struct cosm_device *cdev;
- int ret;
-
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
- return ERR_PTR(-ENOMEM);
-
- cdev->dev.parent = pdev;
- cdev->dev.release = cosm_release_dev;
- cdev->hw_ops = hw_ops;
- dev_set_drvdata(&cdev->dev, cdev);
- cdev->dev.bus = &cosm_bus;
-
- /* Assign a unique device index and hence name */
- ret = ida_simple_get(&cosm_index_ida, 0, 0, GFP_KERNEL);
- if (ret < 0)
- goto free_cdev;
-
- cdev->index = ret;
- cdev->dev.id = ret;
- dev_set_name(&cdev->dev, "cosm-dev%u", cdev->index);
-
- ret = device_register(&cdev->dev);
- if (ret)
- goto ida_remove;
- return cdev;
-ida_remove:
- ida_simple_remove(&cosm_index_ida, cdev->index);
-free_cdev:
- put_device(&cdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(cosm_register_device);
-
-void cosm_unregister_device(struct cosm_device *dev)
-{
- int index = dev->index; /* save for after device release */
-
- device_unregister(&dev->dev);
- ida_simple_remove(&cosm_index_ida, index);
-}
-EXPORT_SYMBOL_GPL(cosm_unregister_device);
-
-struct cosm_device *cosm_find_cdev_by_id(int id)
-{
- struct device *dev = subsys_find_device_by_id(&cosm_bus, id, NULL);
-
- return dev ? container_of(dev, struct cosm_device, dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(cosm_find_cdev_by_id);
-
-static int __init cosm_init(void)
-{
- return bus_register(&cosm_bus);
-}
-
-static void __exit cosm_exit(void)
-{
- bus_unregister(&cosm_bus);
- ida_destroy(&cosm_index_ida);
-}
-
-core_initcall(cosm_init);
-module_exit(cosm_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC card OS state management bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h
deleted file mode 100644
index d50d7aea168d..000000000000
--- a/drivers/misc/mic/bus/cosm_bus.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC COSM Bus Driver
- */
-#ifndef _COSM_BUS_H_
-#define _COSM_BUS_H_
-
-#include <linux/scif.h>
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-/**
- * cosm_device - representation of a cosm device
- *
- * @attr_group: Pointer to list of sysfs attribute groups.
- * @sdev: Device for sysfs entries.
- * @state: MIC state.
- * @prev_state: MIC state previous to MIC_RESETTING
- * @shutdown_status: MIC status reported by card for shutdown/crashes.
- * @shutdown_status_int: Internal shutdown status maintained by the driver
- * @cosm_mutex: Mutex for synchronizing access to data structures.
- * @reset_trigger_work: Work for triggering reset requests.
- * @scif_work: Work for handling per device SCIF connections
- * @cmdline: Kernel command line.
- * @firmware: Firmware file name.
- * @ramdisk: Ramdisk file name.
- * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates.
- * @log_buf_addr: Log buffer address for MIC.
- * @log_buf_len: Log buffer length address for MIC.
- * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes.
- * @hw_ops: the hardware bus ops for this device.
- * @dev: underlying device.
- * @index: unique position on the cosm bus
- * @dbg_dir: debug fs directory
- * @newepd: new endpoint from scif accept to be assigned to this cdev
- * @epd: SCIF endpoint for this cdev
- * @heartbeat_watchdog_enable: if heartbeat watchdog is enabled for this cdev
- * @sysfs_heartbeat_enable: sysfs setting for disabling heartbeat notification
- */
-struct cosm_device {
- const struct attribute_group **attr_group;
- struct device *sdev;
- u8 state;
- u8 prev_state;
- u8 shutdown_status;
- u8 shutdown_status_int;
- struct mutex cosm_mutex;
- struct work_struct reset_trigger_work;
- struct work_struct scif_work;
- char *cmdline;
- char *firmware;
- char *ramdisk;
- char *bootmode;
- void *log_buf_addr;
- int *log_buf_len;
- struct kernfs_node *state_sysfs;
- struct cosm_hw_ops *hw_ops;
- struct device dev;
- int index;
- struct dentry *dbg_dir;
- scif_epd_t newepd;
- scif_epd_t epd;
- bool heartbeat_watchdog_enable;
- bool sysfs_heartbeat_enable;
-};
-
-/**
- * cosm_driver - operations for a cosm driver
- *
- * @driver: underlying device driver (populate name and owner).
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct cosm_driver {
- struct device_driver driver;
- int (*probe)(struct cosm_device *dev);
- void (*remove)(struct cosm_device *dev);
-};
-
-/**
- * cosm_hw_ops - cosm bus ops
- *
- * @reset: trigger MIC reset
- * @force_reset: force MIC reset
- * @post_reset: inform MIC reset is complete
- * @ready: is MIC ready for OS download
- * @start: boot MIC
- * @stop: prepare MIC for reset
- * @family: return MIC HW family string
- * @stepping: return MIC HW stepping string
- * @aper: return MIC PCIe aperture
- */
-struct cosm_hw_ops {
- void (*reset)(struct cosm_device *cdev);
- void (*force_reset)(struct cosm_device *cdev);
- void (*post_reset)(struct cosm_device *cdev, enum mic_states state);
- bool (*ready)(struct cosm_device *cdev);
- int (*start)(struct cosm_device *cdev, int id);
- void (*stop)(struct cosm_device *cdev, bool force);
- ssize_t (*family)(struct cosm_device *cdev, char *buf);
- ssize_t (*stepping)(struct cosm_device *cdev, char *buf);
- struct mic_mw *(*aper)(struct cosm_device *cdev);
-};
-
-struct cosm_device *
-cosm_register_device(struct device *pdev, struct cosm_hw_ops *hw_ops);
-void cosm_unregister_device(struct cosm_device *dev);
-int cosm_register_driver(struct cosm_driver *drv);
-void cosm_unregister_driver(struct cosm_driver *drv);
-struct cosm_device *cosm_find_cdev_by_id(int id);
-
-static inline struct cosm_device *dev_to_cosm(struct device *dev)
-{
- return container_of(dev, struct cosm_device, dev);
-}
-
-static inline struct cosm_driver *drv_to_cosm(struct device_driver *drv)
-{
- return container_of(drv, struct cosm_driver, driver);
-}
-#endif /* _COSM_BUS_H */
diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c
deleted file mode 100644
index a08cb29692a8..000000000000
--- a/drivers/misc/mic/bus/mic_bus.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC Bus driver.
- *
- * This implementation is very similar to the the virtio bus driver
- * implementation @ drivers/virtio/virtio.c
- */
-#include <linux/dma-map-ops.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/mic_bus.h>
-
-static ssize_t device_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- return sprintf(buf, "0x%04x\n", dev->id.device);
-}
-static DEVICE_ATTR_RO(device);
-
-static ssize_t vendor_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
-}
-static DEVICE_ATTR_RO(vendor);
-
-static ssize_t modalias_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- return sprintf(buf, "mbus:d%08Xv%08X\n",
- dev->id.device, dev->id.vendor);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *mbus_dev_attrs[] = {
- &dev_attr_device.attr,
- &dev_attr_vendor.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(mbus_dev);
-
-static inline int mbus_id_match(const struct mbus_device *dev,
- const struct mbus_device_id *id)
-{
- if (id->device != dev->id.device && id->device != MBUS_DEV_ANY_ID)
- return 0;
-
- return id->vendor == MBUS_DEV_ANY_ID || id->vendor == dev->id.vendor;
-}
-
-/*
- * This looks through all the IDs a driver claims to support. If any of them
- * match, we return 1 and the kernel will call mbus_dev_probe().
- */
-static int mbus_dev_match(struct device *dv, struct device_driver *dr)
-{
- unsigned int i;
- struct mbus_device *dev = dev_to_mbus(dv);
- const struct mbus_device_id *ids;
-
- ids = drv_to_mbus(dr)->id_table;
- for (i = 0; ids[i].device; i++)
- if (mbus_id_match(dev, &ids[i]))
- return 1;
- return 0;
-}
-
-static int mbus_uevent(struct device *dv, struct kobj_uevent_env *env)
-{
- struct mbus_device *dev = dev_to_mbus(dv);
-
- return add_uevent_var(env, "MODALIAS=mbus:d%08Xv%08X",
- dev->id.device, dev->id.vendor);
-}
-
-static int mbus_dev_probe(struct device *d)
-{
- int err;
- struct mbus_device *dev = dev_to_mbus(d);
- struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
-
- err = drv->probe(dev);
- if (!err)
- if (drv->scan)
- drv->scan(dev);
- return err;
-}
-
-static int mbus_dev_remove(struct device *d)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type mic_bus = {
- .name = "mic_bus",
- .match = mbus_dev_match,
- .dev_groups = mbus_dev_groups,
- .uevent = mbus_uevent,
- .probe = mbus_dev_probe,
- .remove = mbus_dev_remove,
-};
-
-int mbus_register_driver(struct mbus_driver *driver)
-{
- driver->driver.bus = &mic_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(mbus_register_driver);
-
-void mbus_unregister_driver(struct mbus_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(mbus_unregister_driver);
-
-static void mbus_release_dev(struct device *d)
-{
- struct mbus_device *mbdev = dev_to_mbus(d);
- kfree(mbdev);
-}
-
-struct mbus_device *
-mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, int index,
- void __iomem *mmio_va)
-{
- int ret;
- struct mbus_device *mbdev;
-
- mbdev = kzalloc(sizeof(*mbdev), GFP_KERNEL);
- if (!mbdev)
- return ERR_PTR(-ENOMEM);
-
- mbdev->mmio_va = mmio_va;
- mbdev->dev.parent = pdev;
- mbdev->id.device = id;
- mbdev->id.vendor = MBUS_DEV_ANY_ID;
- mbdev->dev.dma_ops = dma_ops;
- mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
- dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
- mbdev->dev.release = mbus_release_dev;
- mbdev->hw_ops = hw_ops;
- mbdev->dev.bus = &mic_bus;
- mbdev->index = index;
- dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index);
- /*
- * device_register() causes the bus infrastructure to look for a
- * matching driver.
- */
- ret = device_register(&mbdev->dev);
- if (ret)
- goto free_mbdev;
- return mbdev;
-free_mbdev:
- put_device(&mbdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(mbus_register_device);
-
-void mbus_unregister_device(struct mbus_device *mbdev)
-{
- device_unregister(&mbdev->dev);
-}
-EXPORT_SYMBOL_GPL(mbus_unregister_device);
-
-static int __init mbus_init(void)
-{
- return bus_register(&mic_bus);
-}
-
-static void __exit mbus_exit(void)
-{
- bus_unregister(&mic_bus);
-}
-
-core_initcall(mbus_init);
-module_exit(mbus_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC Bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c
deleted file mode 100644
index ad7c3604f151..000000000000
--- a/drivers/misc/mic/bus/scif_bus.c
+++ /dev/null
@@ -1,201 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel Symmetric Communications Interface Bus driver.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/dma-map-ops.h>
-
-#include "scif_bus.h"
-
-static ssize_t device_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.device);
-}
-static DEVICE_ATTR_RO(device);
-
-static ssize_t vendor_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
-}
-static DEVICE_ATTR_RO(vendor);
-
-static ssize_t modalias_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
-
- return sprintf(buf, "scif:d%08Xv%08X\n",
- dev->id.device, dev->id.vendor);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *scif_dev_attrs[] = {
- &dev_attr_device.attr,
- &dev_attr_vendor.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(scif_dev);
-
-static inline int scif_id_match(const struct scif_hw_dev *dev,
- const struct scif_hw_dev_id *id)
-{
- if (id->device != dev->id.device && id->device != SCIF_DEV_ANY_ID)
- return 0;
-
- return id->vendor == SCIF_DEV_ANY_ID || id->vendor == dev->id.vendor;
-}
-
-/*
- * This looks through all the IDs a driver claims to support. If any of them
- * match, we return 1 and the kernel will call scif_dev_probe().
- */
-static int scif_dev_match(struct device *dv, struct device_driver *dr)
-{
- unsigned int i;
- struct scif_hw_dev *dev = dev_to_scif(dv);
- const struct scif_hw_dev_id *ids;
-
- ids = drv_to_scif(dr)->id_table;
- for (i = 0; ids[i].device; i++)
- if (scif_id_match(dev, &ids[i]))
- return 1;
- return 0;
-}
-
-static int scif_uevent(struct device *dv, struct kobj_uevent_env *env)
-{
- struct scif_hw_dev *dev = dev_to_scif(dv);
-
- return add_uevent_var(env, "MODALIAS=scif:d%08Xv%08X",
- dev->id.device, dev->id.vendor);
-}
-
-static int scif_dev_probe(struct device *d)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
- struct scif_driver *drv = drv_to_scif(dev->dev.driver);
-
- return drv->probe(dev);
-}
-
-static int scif_dev_remove(struct device *d)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
- struct scif_driver *drv = drv_to_scif(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type scif_bus = {
- .name = "scif_bus",
- .match = scif_dev_match,
- .dev_groups = scif_dev_groups,
- .uevent = scif_uevent,
- .probe = scif_dev_probe,
- .remove = scif_dev_remove,
-};
-
-int scif_register_driver(struct scif_driver *driver)
-{
- driver->driver.bus = &scif_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(scif_register_driver);
-
-void scif_unregister_driver(struct scif_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(scif_unregister_driver);
-
-static void scif_release_dev(struct device *d)
-{
- struct scif_hw_dev *sdev = dev_to_scif(d);
-
- kfree(sdev);
-}
-
-struct scif_hw_dev *
-scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
- struct mic_mw *mmio, struct mic_mw *aper, void *dp,
- void __iomem *rdp, struct dma_chan **chan, int num_chan,
- bool card_rel_da)
-{
- int ret;
- struct scif_hw_dev *sdev;
-
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev)
- return ERR_PTR(-ENOMEM);
-
- sdev->dev.parent = pdev;
- sdev->id.device = id;
- sdev->id.vendor = SCIF_DEV_ANY_ID;
- sdev->dev.dma_ops = dma_ops;
- sdev->dev.release = scif_release_dev;
- sdev->hw_ops = hw_ops;
- sdev->dnode = dnode;
- sdev->snode = snode;
- dev_set_drvdata(&sdev->dev, sdev);
- sdev->dev.bus = &scif_bus;
- sdev->mmio = mmio;
- sdev->aper = aper;
- sdev->dp = dp;
- sdev->rdp = rdp;
- sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask;
- dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
- sdev->dma_ch = chan;
- sdev->num_dma_ch = num_chan;
- sdev->card_rel_da = card_rel_da;
- dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
- /*
- * device_register() causes the bus infrastructure to look for a
- * matching driver.
- */
- ret = device_register(&sdev->dev);
- if (ret)
- goto free_sdev;
- return sdev;
-free_sdev:
- put_device(&sdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(scif_register_device);
-
-void scif_unregister_device(struct scif_hw_dev *sdev)
-{
- device_unregister(&sdev->dev);
-}
-EXPORT_SYMBOL_GPL(scif_unregister_device);
-
-static int __init scif_init(void)
-{
- return bus_register(&scif_bus);
-}
-
-static void __exit scif_exit(void)
-{
- bus_unregister(&scif_bus);
-}
-
-core_initcall(scif_init);
-module_exit(scif_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) SCIF Bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
deleted file mode 100644
index 4981eb56f879..000000000000
--- a/drivers/misc/mic/bus/scif_bus.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel Symmetric Communications Interface Bus driver.
- */
-#ifndef _SCIF_BUS_H_
-#define _SCIF_BUS_H_
-/*
- * Everything a scif driver needs to work with any particular scif
- * hardware abstraction layer.
- */
-#include <linux/dma-map-ops.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-struct scif_hw_dev_id {
- u32 device;
- u32 vendor;
-};
-
-#define MIC_SCIF_DEV 1
-#define SCIF_DEV_ANY_ID 0xffffffff
-
-/**
- * scif_hw_dev - representation of a hardware device abstracted for scif
- * @hw_ops: the hardware ops supported by this device
- * @id: the device type identification (used to match it with a driver)
- * @mmio: MMIO memory window
- * @aper: Aperture memory window
- * @dev: underlying device
- * @dnode - The destination node which this device will communicate with.
- * @snode - The source node for this device.
- * @dp - Self device page
- * @rdp - Remote device page
- * @dma_ch - Array of DMA channels
- * @num_dma_ch - Number of DMA channels available
- * @card_rel_da - Set to true if DMA addresses programmed in the DMA engine
- * are relative to the card point of view
- */
-struct scif_hw_dev {
- struct scif_hw_ops *hw_ops;
- struct scif_hw_dev_id id;
- struct mic_mw *mmio;
- struct mic_mw *aper;
- struct device dev;
- u8 dnode;
- u8 snode;
- void *dp;
- void __iomem *rdp;
- struct dma_chan **dma_ch;
- int num_dma_ch;
- bool card_rel_da;
-};
-
-/**
- * scif_driver - operations for a scif I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct scif_driver {
- struct device_driver driver;
- const struct scif_hw_dev_id *id_table;
- int (*probe)(struct scif_hw_dev *dev);
- void (*remove)(struct scif_hw_dev *dev);
-};
-
-/**
- * scif_hw_ops - Hardware operations for accessing a SCIF device on the SCIF bus.
- *
- * @next_db: Obtain the next available doorbell.
- * @request_irq: Request an interrupt on a particular doorbell.
- * @free_irq: Free an interrupt requested previously.
- * @ack_interrupt: acknowledge an interrupt in the ISR.
- * @send_intr: Send an interrupt to the remote node on a specified doorbell.
- * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
- * which is specifically targeted for a peer to peer node.
- * @remap: Map a buffer with the specified physical address and length.
- * @unmap: Unmap a buffer previously mapped.
- */
-struct scif_hw_ops {
- int (*next_db)(struct scif_hw_dev *sdev);
- struct mic_irq * (*request_irq)(struct scif_hw_dev *sdev,
- irqreturn_t (*func)(int irq,
- void *data),
- const char *name, void *data,
- int db);
- void (*free_irq)(struct scif_hw_dev *sdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct scif_hw_dev *sdev, int num);
- void (*send_intr)(struct scif_hw_dev *sdev, int db);
- void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
- struct mic_mw *mw);
- void __iomem * (*remap)(struct scif_hw_dev *sdev,
- phys_addr_t pa, size_t len);
- void (*unmap)(struct scif_hw_dev *sdev, void __iomem *va);
-};
-
-int scif_register_driver(struct scif_driver *driver);
-void scif_unregister_driver(struct scif_driver *driver);
-struct scif_hw_dev *
-scif_register_device(struct device *pdev, int id,
- const struct dma_map_ops *dma_ops,
- struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
- struct mic_mw *mmio, struct mic_mw *aper,
- void *dp, void __iomem *rdp,
- struct dma_chan **chan, int num_chan,
- bool card_rel_da);
-void scif_unregister_device(struct scif_hw_dev *sdev);
-
-static inline struct scif_hw_dev *dev_to_scif(struct device *dev)
-{
- return container_of(dev, struct scif_hw_dev, dev);
-}
-
-static inline struct scif_driver *drv_to_scif(struct device_driver *drv)
-{
- return container_of(drv, struct scif_driver, driver);
-}
-#endif /* _SCIF_BUS_H */
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c
deleted file mode 100644
index 6935ddca1bd5..000000000000
--- a/drivers/misc/mic/bus/vop_bus.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) Bus driver.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/dma-map-ops.h>
-
-#include "vop_bus.h"
-
-static ssize_t device_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.device);
-}
-static DEVICE_ATTR_RO(device);
-
-static ssize_t vendor_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
-}
-static DEVICE_ATTR_RO(vendor);
-
-static ssize_t modalias_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- return sprintf(buf, "vop:d%08Xv%08X\n",
- dev->id.device, dev->id.vendor);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *vop_dev_attrs[] = {
- &dev_attr_device.attr,
- &dev_attr_vendor.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(vop_dev);
-
-static inline int vop_id_match(const struct vop_device *dev,
- const struct vop_device_id *id)
-{
- if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID)
- return 0;
-
- return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor;
-}
-
-/*
- * This looks through all the IDs a driver claims to support. If any of them
- * match, we return 1 and the kernel will call vop_dev_probe().
- */
-static int vop_dev_match(struct device *dv, struct device_driver *dr)
-{
- unsigned int i;
- struct vop_device *dev = dev_to_vop(dv);
- const struct vop_device_id *ids;
-
- ids = drv_to_vop(dr)->id_table;
- for (i = 0; ids[i].device; i++)
- if (vop_id_match(dev, &ids[i]))
- return 1;
- return 0;
-}
-
-static int vop_uevent(struct device *dv, struct kobj_uevent_env *env)
-{
- struct vop_device *dev = dev_to_vop(dv);
-
- return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X",
- dev->id.device, dev->id.vendor);
-}
-
-static int vop_dev_probe(struct device *d)
-{
- struct vop_device *dev = dev_to_vop(d);
- struct vop_driver *drv = drv_to_vop(dev->dev.driver);
-
- return drv->probe(dev);
-}
-
-static int vop_dev_remove(struct device *d)
-{
- struct vop_device *dev = dev_to_vop(d);
- struct vop_driver *drv = drv_to_vop(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type vop_bus = {
- .name = "vop_bus",
- .match = vop_dev_match,
- .dev_groups = vop_dev_groups,
- .uevent = vop_uevent,
- .probe = vop_dev_probe,
- .remove = vop_dev_remove,
-};
-
-int vop_register_driver(struct vop_driver *driver)
-{
- driver->driver.bus = &vop_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(vop_register_driver);
-
-void vop_unregister_driver(struct vop_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(vop_unregister_driver);
-
-static void vop_release_dev(struct device *d)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- kfree(dev);
-}
-
-struct vop_device *
-vop_register_device(struct device *pdev, int id,
- const struct dma_map_ops *dma_ops,
- struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
- struct dma_chan *chan)
-{
- int ret;
- struct vop_device *vdev;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return ERR_PTR(-ENOMEM);
-
- vdev->dev.parent = pdev;
- vdev->id.device = id;
- vdev->id.vendor = VOP_DEV_ANY_ID;
- vdev->dev.dma_ops = dma_ops;
- vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
- dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
- vdev->dev.release = vop_release_dev;
- vdev->hw_ops = hw_ops;
- vdev->dev.bus = &vop_bus;
- vdev->dnode = dnode;
- vdev->aper = aper;
- vdev->dma_ch = chan;
- vdev->index = dnode - 1;
- dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
- /*
- * device_register() causes the bus infrastructure to look for a
- * matching driver.
- */
- ret = device_register(&vdev->dev);
- if (ret)
- goto free_vdev;
- return vdev;
-free_vdev:
- put_device(&vdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(vop_register_device);
-
-void vop_unregister_device(struct vop_device *dev)
-{
- device_unregister(&dev->dev);
-}
-EXPORT_SYMBOL_GPL(vop_unregister_device);
-
-static int __init vop_init(void)
-{
- return bus_register(&vop_bus);
-}
-
-static void __exit vop_exit(void)
-{
- bus_unregister(&vop_bus);
-}
-
-core_initcall(vop_init);
-module_exit(vop_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) VOP Bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
deleted file mode 100644
index 4fa02808c1e2..000000000000
--- a/drivers/misc/mic/bus/vop_bus.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio over PCIe Bus driver.
- */
-#ifndef _VOP_BUS_H_
-#define _VOP_BUS_H_
-/*
- * Everything a vop driver needs to work with any particular vop
- * implementation.
- */
-#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
-
-#include "../common/mic_dev.h"
-
-struct vop_device_id {
- u32 device;
- u32 vendor;
-};
-
-#define VOP_DEV_TRNSP 1
-#define VOP_DEV_ANY_ID 0xffffffff
-/*
- * Size of the internal buffer used during DMA's as an intermediate buffer
- * for copy to/from user. Must be an integral number of pages.
- */
-#define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
-
-/**
- * vop_device - representation of a device using vop
- * @hw_ops: the hardware ops supported by this device.
- * @id: the device type identification (used to match it with a driver).
- * @dev: underlying device.
- * @dnode - The destination node which this device will communicate with.
- * @aper: Aperture memory window
- * @dma_ch - DMA channel
- * @index: unique position on the vop bus
- */
-struct vop_device {
- struct vop_hw_ops *hw_ops;
- struct vop_device_id id;
- struct device dev;
- u8 dnode;
- struct mic_mw *aper;
- struct dma_chan *dma_ch;
- int index;
-};
-
-/**
- * vop_driver - operations for a vop I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct vop_driver {
- struct device_driver driver;
- const struct vop_device_id *id_table;
- int (*probe)(struct vop_device *dev);
- void (*remove)(struct vop_device *dev);
-};
-
-/**
- * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus.
- *
- * @next_db: Obtain the next available doorbell.
- * @request_irq: Request an interrupt on a particular doorbell.
- * @free_irq: Free an interrupt requested previously.
- * @ack_interrupt: acknowledge an interrupt in the ISR.
- * @get_remote_dp: Get access to the virtio device page used by the remote
- * node to add/remove/configure virtio devices.
- * @get_dp: Get access to the virtio device page used by the self
- * node to add/remove/configure virtio devices.
- * @send_intr: Send an interrupt to the peer node on a specified doorbell.
- * @remap: Map a buffer with the specified DMA address and length.
- * @unmap: Unmap a buffer previously mapped.
- * @dma_filter: The DMA filter function to use for obtaining access to
- * a DMA channel on the peer node.
- */
-struct vop_hw_ops {
- int (*next_db)(struct vop_device *vpdev);
- struct mic_irq *(*request_irq)(struct vop_device *vpdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data,
- int intr_src);
- void (*free_irq)(struct vop_device *vpdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct vop_device *vpdev, int num);
- void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
- void * (*get_dp)(struct vop_device *vpdev);
- void (*send_intr)(struct vop_device *vpdev, int db);
- void __iomem * (*remap)(struct vop_device *vpdev,
- dma_addr_t pa, size_t len);
- void (*unmap)(struct vop_device *vpdev, void __iomem *va);
-};
-
-struct vop_device *
-vop_register_device(struct device *pdev, int id,
- const struct dma_map_ops *dma_ops,
- struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
- struct dma_chan *chan);
-void vop_unregister_device(struct vop_device *dev);
-int vop_register_driver(struct vop_driver *drv);
-void vop_unregister_driver(struct vop_driver *drv);
-
-/*
- * module_vop_driver() - Helper macro for drivers that don't do
- * anything special in module init/exit. This eliminates a lot of
- * boilerplate. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit()
- */
-#define module_vop_driver(__vop_driver) \
- module_driver(__vop_driver, vop_register_driver, \
- vop_unregister_driver)
-
-static inline struct vop_device *dev_to_vop(struct device *dev)
-{
- return container_of(dev, struct vop_device, dev);
-}
-
-static inline struct vop_driver *drv_to_vop(struct device_driver *drv)
-{
- return container_of(drv, struct vop_driver, driver);
-}
-#endif /* _VOP_BUS_H */
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
deleted file mode 100644
index 921a7e7e0fbd..000000000000
--- a/drivers/misc/mic/card/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2013, Intel Corporation.
-#
-ccflags-y += -DINTEL_MIC_CARD
-
-obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o
-mic_card-y += mic_x100.o
-mic_card-y += mic_device.o
-mic_card-y += mic_debugfs.o
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
deleted file mode 100644
index 4c326e8f4d99..000000000000
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-
-/* Debugfs parent dir */
-static struct dentry *mic_dbg;
-
-/*
- * mic_intr_show - Send interrupts to host.
- */
-static int mic_intr_show(struct seq_file *s, void *unused)
-{
- struct mic_driver *mdrv = s->private;
- struct mic_device *mdev = &mdrv->mdev;
-
- mic_send_intr(mdev, 0);
- msleep(1000);
- mic_send_intr(mdev, 1);
- msleep(1000);
- mic_send_intr(mdev, 2);
- msleep(1000);
- mic_send_intr(mdev, 3);
- msleep(1000);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_intr);
-
-/*
- * mic_create_card_debug_dir - Initialize MIC debugfs entries.
- */
-void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
-{
- if (!mic_dbg)
- return;
-
- mdrv->dbg_dir = debugfs_create_dir(mdrv->name, mic_dbg);
-
- debugfs_create_file("intr_test", 0444, mdrv->dbg_dir, mdrv,
- &mic_intr_fops);
-}
-
-/*
- * mic_delete_card_debug_dir - Uninitialize MIC debugfs entries.
- */
-void mic_delete_card_debug_dir(struct mic_driver *mdrv)
-{
- debugfs_remove_recursive(mdrv->dbg_dir);
-}
-
-/*
- * mic_init_card_debugfs - Initialize global debugfs entry.
- */
-void __init mic_init_card_debugfs(void)
-{
- mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-/*
- * mic_exit_card_debugfs - Uninitialize global debugfs entry
- */
-void mic_exit_card_debugfs(void)
-{
- debugfs_remove(mic_dbg);
-}
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
deleted file mode 100644
index a15606259bdc..000000000000
--- a/drivers/misc/mic/card/mic_device.c
+++ /dev/null
@@ -1,417 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/dmaengine.h>
-#include <linux/kmod.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-
-static struct mic_driver *g_drv;
-
-static int __init mic_dp_init(void)
-{
- struct mic_driver *mdrv = g_drv;
- struct mic_device *mdev = &mdrv->mdev;
- struct mic_bootparam __iomem *bootparam;
- u64 lo, hi, dp_dma_addr;
- u32 magic;
-
- lo = mic_read_spad(&mdrv->mdev, MIC_DPLO_SPAD);
- hi = mic_read_spad(&mdrv->mdev, MIC_DPHI_SPAD);
-
- dp_dma_addr = lo | (hi << 32);
- mdrv->dp = mic_card_map(mdev, dp_dma_addr, MIC_DP_SIZE);
- if (!mdrv->dp) {
- dev_err(mdrv->dev, "Cannot remap Aperture BAR\n");
- return -ENOMEM;
- }
- bootparam = mdrv->dp;
- magic = ioread32(&bootparam->magic);
- if (MIC_MAGIC != magic) {
- dev_err(mdrv->dev, "bootparam magic mismatch 0x%x\n", magic);
- return -EIO;
- }
- return 0;
-}
-
-/* Uninitialize the device page */
-static void mic_dp_uninit(void)
-{
- mic_card_unmap(&g_drv->mdev, g_drv->dp);
-}
-
-/**
- * mic_request_card_irq - request an irq.
- *
- * @handler: interrupt handler passed to request_threaded_irq.
- * @thread_fn: thread fn. passed to request_threaded_irq.
- * @name: The ASCII name of the callee requesting the irq.
- * @data: private data that is returned back when calling the
- * function handler.
- * @index: The doorbell index of the requester.
- *
- * returns: The cookie that is transparent to the caller. Passed
- * back when calling mic_free_irq. An appropriate error code
- * is returned on failure. Caller needs to use IS_ERR(return_val)
- * to check for failure and PTR_ERR(return_val) to obtained the
- * error code.
- *
- */
-struct mic_irq *
-mic_request_card_irq(irq_handler_t handler,
- irq_handler_t thread_fn, const char *name,
- void *data, int index)
-{
- int rc = 0;
- unsigned long cookie;
- struct mic_driver *mdrv = g_drv;
-
- rc = request_threaded_irq(mic_db_to_irq(mdrv, index), handler,
- thread_fn, 0, name, data);
- if (rc) {
- dev_err(mdrv->dev, "request_threaded_irq failed rc = %d\n", rc);
- goto err;
- }
- mdrv->irq_info.irq_usage_count[index]++;
- cookie = index;
- return (struct mic_irq *)cookie;
-err:
- return ERR_PTR(rc);
-}
-
-/**
- * mic_free_card_irq - free irq.
- *
- * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
- * @data: private data specified by the calling function during the
- * mic_request_threaded_irq
- *
- * returns: none.
- */
-void mic_free_card_irq(struct mic_irq *cookie, void *data)
-{
- int index;
- struct mic_driver *mdrv = g_drv;
-
- index = (unsigned long)cookie & 0xFFFFU;
- free_irq(mic_db_to_irq(mdrv, index), data);
- mdrv->irq_info.irq_usage_count[index]--;
-}
-
-/**
- * mic_next_card_db - Get the doorbell with minimum usage count.
- *
- * Returns the irq index.
- */
-int mic_next_card_db(void)
-{
- int i;
- int index = 0;
- struct mic_driver *mdrv = g_drv;
-
- for (i = 0; i < mdrv->intr_info.num_intr; i++) {
- if (mdrv->irq_info.irq_usage_count[i] <
- mdrv->irq_info.irq_usage_count[index])
- index = i;
- }
-
- return index;
-}
-
-/**
- * mic_init_irq - Initialize irq information.
- *
- * Returns 0 in success. Appropriate error code on failure.
- */
-static int mic_init_irq(void)
-{
- struct mic_driver *mdrv = g_drv;
-
- mdrv->irq_info.irq_usage_count = kzalloc((sizeof(u32) *
- mdrv->intr_info.num_intr),
- GFP_KERNEL);
- if (!mdrv->irq_info.irq_usage_count)
- return -ENOMEM;
- return 0;
-}
-
-/**
- * mic_uninit_irq - Uninitialize irq information.
- *
- * None.
- */
-static void mic_uninit_irq(void)
-{
- struct mic_driver *mdrv = g_drv;
-
- kfree(mdrv->irq_info.irq_usage_count);
-}
-
-static inline struct mic_driver *scdev_to_mdrv(struct scif_hw_dev *scdev)
-{
- return dev_get_drvdata(scdev->dev.parent);
-}
-
-static struct mic_irq *
-___mic_request_irq(struct scif_hw_dev *scdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data,
- int db)
-{
- return mic_request_card_irq(func, NULL, name, data, db);
-}
-
-static void
-___mic_free_irq(struct scif_hw_dev *scdev,
- struct mic_irq *cookie, void *data)
-{
- return mic_free_card_irq(cookie, data);
-}
-
-static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- mic_ack_interrupt(&mdrv->mdev);
-}
-
-static int ___mic_next_db(struct scif_hw_dev *scdev)
-{
- return mic_next_card_db();
-}
-
-static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- mic_send_intr(&mdrv->mdev, db);
-}
-
-static void ___mic_send_p2p_intr(struct scif_hw_dev *scdev, int db,
- struct mic_mw *mw)
-{
- mic_send_p2p_intr(db, mw);
-}
-
-static void __iomem *
-___mic_ioremap(struct scif_hw_dev *scdev,
- phys_addr_t pa, size_t len)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- return mic_card_map(&mdrv->mdev, pa, len);
-}
-
-static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- mic_card_unmap(&mdrv->mdev, va);
-}
-
-static struct scif_hw_ops scif_hw_ops = {
- .request_irq = ___mic_request_irq,
- .free_irq = ___mic_free_irq,
- .ack_interrupt = ___mic_ack_interrupt,
- .next_db = ___mic_next_db,
- .send_intr = ___mic_send_intr,
- .send_p2p_intr = ___mic_send_p2p_intr,
- .remap = ___mic_ioremap,
- .unmap = ___mic_iounmap,
-};
-
-static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
-{
- return dev_get_drvdata(vpdev->dev.parent);
-}
-
-static struct mic_irq *
-__mic_request_irq(struct vop_device *vpdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data, int intr_src)
-{
- return mic_request_card_irq(func, NULL, name, data, intr_src);
-}
-
-static void __mic_free_irq(struct vop_device *vpdev,
- struct mic_irq *cookie, void *data)
-{
- return mic_free_card_irq(cookie, data);
-}
-
-static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- mic_ack_interrupt(&mdrv->mdev);
-}
-
-static int __mic_next_db(struct vop_device *vpdev)
-{
- return mic_next_card_db();
-}
-
-static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- return mdrv->dp;
-}
-
-static void __mic_send_intr(struct vop_device *vpdev, int db)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- mic_send_intr(&mdrv->mdev, db);
-}
-
-static void __iomem *__mic_ioremap(struct vop_device *vpdev,
- dma_addr_t pa, size_t len)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- return mic_card_map(&mdrv->mdev, pa, len);
-}
-
-static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- mic_card_unmap(&mdrv->mdev, va);
-}
-
-static struct vop_hw_ops vop_hw_ops = {
- .request_irq = __mic_request_irq,
- .free_irq = __mic_free_irq,
- .ack_interrupt = __mic_ack_interrupt,
- .next_db = __mic_next_db,
- .get_remote_dp = __mic_get_remote_dp,
- .send_intr = __mic_send_intr,
- .remap = __mic_ioremap,
- .unmap = __mic_iounmap,
-};
-
-static int mic_request_dma_chans(struct mic_driver *mdrv)
-{
- dma_cap_mask_t mask;
- struct dma_chan *chan;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
-
- do {
- chan = dma_request_channel(mask, NULL, NULL);
- if (chan) {
- mdrv->dma_ch[mdrv->num_dma_ch++] = chan;
- if (mdrv->num_dma_ch >= MIC_MAX_DMA_CHAN)
- break;
- }
- } while (chan);
- dev_info(mdrv->dev, "DMA channels # %d\n", mdrv->num_dma_ch);
- return mdrv->num_dma_ch;
-}
-
-static void mic_free_dma_chans(struct mic_driver *mdrv)
-{
- int i = 0;
-
- for (i = 0; i < mdrv->num_dma_ch; i++) {
- dma_release_channel(mdrv->dma_ch[i]);
- mdrv->dma_ch[i] = NULL;
- }
- mdrv->num_dma_ch = 0;
-}
-
-/*
- * mic_driver_init - MIC driver initialization tasks.
- *
- * Returns 0 in success. Appropriate error code on failure.
- */
-int __init mic_driver_init(struct mic_driver *mdrv)
-{
- int rc;
- struct mic_bootparam __iomem *bootparam;
- u8 node_id;
-
- g_drv = mdrv;
- /* Unloading the card module is not supported. */
- if (!try_module_get(mdrv->dev->driver->owner)) {
- rc = -ENODEV;
- goto done;
- }
- rc = mic_dp_init();
- if (rc)
- goto put;
- rc = mic_init_irq();
- if (rc)
- goto dp_uninit;
- if (!mic_request_dma_chans(mdrv)) {
- rc = -ENODEV;
- goto irq_uninit;
- }
- mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP,
- NULL, &vop_hw_ops, 0,
- NULL, mdrv->dma_ch[0]);
- if (IS_ERR(mdrv->vpdev)) {
- rc = PTR_ERR(mdrv->vpdev);
- goto dma_free;
- }
- bootparam = mdrv->dp;
- node_id = ioread8(&bootparam->node_id);
- mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
- NULL, &scif_hw_ops,
- 0, node_id, &mdrv->mdev.mmio, NULL,
- NULL, mdrv->dp, mdrv->dma_ch,
- mdrv->num_dma_ch, true);
- if (IS_ERR(mdrv->scdev)) {
- rc = PTR_ERR(mdrv->scdev);
- goto vop_remove;
- }
- mic_create_card_debug_dir(mdrv);
-done:
- return rc;
-vop_remove:
- vop_unregister_device(mdrv->vpdev);
-dma_free:
- mic_free_dma_chans(mdrv);
-irq_uninit:
- mic_uninit_irq();
-dp_uninit:
- mic_dp_uninit();
-put:
- module_put(mdrv->dev->driver->owner);
- return rc;
-}
-
-/*
- * mic_driver_uninit - MIC driver uninitialization tasks.
- *
- * Returns None
- */
-void mic_driver_uninit(struct mic_driver *mdrv)
-{
- mic_delete_card_debug_dir(mdrv);
- scif_unregister_device(mdrv->scdev);
- vop_unregister_device(mdrv->vpdev);
- mic_free_dma_chans(mdrv);
- mic_uninit_irq();
- mic_dp_uninit();
- module_put(mdrv->dev->driver->owner);
-}
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
deleted file mode 100644
index d6cc69a235a3..000000000000
--- a/drivers/misc/mic/card/mic_device.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#ifndef _MIC_CARD_DEVICE_H_
-#define _MIC_CARD_DEVICE_H_
-
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/mic_bus.h>
-#include "../bus/scif_bus.h"
-#include "../bus/vop_bus.h"
-
-/**
- * struct mic_intr_info - Contains h/w specific interrupt sources info
- *
- * @num_intr: The number of irqs available
- */
-struct mic_intr_info {
- u32 num_intr;
-};
-
-/**
- * struct mic_irq_info - OS specific irq information
- *
- * @irq_usage_count: usage count array tracking the number of sources
- * assigned for each irq.
- */
-struct mic_irq_info {
- int *irq_usage_count;
-};
-
-/**
- * struct mic_device - MIC device information.
- *
- * @mmio: MMIO bar information.
- */
-struct mic_device {
- struct mic_mw mmio;
-};
-
-/**
- * struct mic_driver - MIC card driver information.
- *
- * @name: Name for MIC driver.
- * @dbg_dir: debugfs directory of this MIC device.
- * @dev: The device backing this MIC.
- * @dp: The pointer to the virtio device page.
- * @mdev: MIC device information for the host.
- * @hotplug_work: Hot plug work for adding/removing virtio devices.
- * @irq_info: The OS specific irq information
- * @intr_info: H/W specific interrupt information.
- * @dma_mbdev: dma device on the MIC virtual bus.
- * @dma_ch - Array of DMA channels
- * @num_dma_ch - Number of DMA channels available
- * @scdev: SCIF device on the SCIF virtual bus.
- * @vpdev: Virtio over PCIe device on the VOP virtual bus.
- */
-struct mic_driver {
- char name[20];
- struct dentry *dbg_dir;
- struct device *dev;
- void __iomem *dp;
- struct mic_device mdev;
- struct work_struct hotplug_work;
- struct mic_irq_info irq_info;
- struct mic_intr_info intr_info;
- struct mbus_device *dma_mbdev;
- struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
- int num_dma_ch;
- struct scif_hw_dev *scdev;
- struct vop_device *vpdev;
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/**
- * mic_mmio_read - read from an MMIO register.
- * @mw: MMIO register base virtual address.
- * @offset: register offset.
- *
- * RETURNS: register value.
- */
-static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset)
-{
- return ioread32(mw->va + offset);
-}
-
-/**
- * mic_mmio_write - write to an MMIO register.
- * @mw: MMIO register base virtual address.
- * @val: the data value to put into the register
- * @offset: register offset.
- *
- * RETURNS: none.
- */
-static inline void
-mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
-{
- iowrite32(val, mw->va + offset);
-}
-
-int mic_driver_init(struct mic_driver *mdrv);
-void mic_driver_uninit(struct mic_driver *mdrv);
-int mic_next_card_db(void);
-struct mic_irq *
-mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int db);
-void mic_free_card_irq(struct mic_irq *cookie, void *data);
-u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
-void mic_send_intr(struct mic_device *mdev, int doorbell);
-void mic_send_p2p_intr(int doorbell, struct mic_mw *mw);
-int mic_db_to_irq(struct mic_driver *mdrv, int db);
-u32 mic_ack_interrupt(struct mic_device *mdev);
-void mic_hw_intr_init(struct mic_driver *mdrv);
-void __iomem *
-mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size);
-void mic_card_unmap(struct mic_device *mdev, void __iomem *addr);
-void __init mic_create_card_debug_dir(struct mic_driver *mdrv);
-void mic_delete_card_debug_dir(struct mic_driver *mdrv);
-void __init mic_init_card_debugfs(void);
-void mic_exit_card_debugfs(void);
-#endif
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
deleted file mode 100644
index c8bff2916d3d..000000000000
--- a/drivers/misc/mic/card/mic_x100.c
+++ /dev/null
@@ -1,347 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_x100.h"
-
-static const char mic_driver_name[] = "mic";
-
-static struct mic_driver g_drv;
-
-/**
- * mic_read_spad - read from the scratchpad register
- * @mdev: pointer to mic_device instance
- * @idx: index to scratchpad register, 0 based
- *
- * This function allows reading of the 32bit scratchpad register.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-u32 mic_read_spad(struct mic_device *mdev, unsigned int idx)
-{
- return mic_mmio_read(&mdev->mmio,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SPAD0 + idx * 4);
-}
-
-/**
- * __mic_send_intr - Send interrupt to Host.
- * @mdev: pointer to mic_device instance
- * @doorbell: Doorbell number.
- */
-void mic_send_intr(struct mic_device *mdev, int doorbell)
-{
- struct mic_mw *mw = &mdev->mmio;
-
- if (doorbell > MIC_X100_MAX_DOORBELL_IDX)
- return;
- /* Ensure that the interrupt is ordered w.r.t previous stores. */
- wmb();
- mic_mmio_write(mw, MIC_X100_SBOX_SDBIC0_DBREQ_BIT,
- MIC_X100_SBOX_BASE_ADDRESS +
- (MIC_X100_SBOX_SDBIC0 + (4 * doorbell)));
-}
-
-/*
- * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
- */
-static void mic_x100_send_sbox_intr(struct mic_mw *mw, int doorbell)
-{
- u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
- u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
- apic_icr_offset);
-
- /* for MIC we need to make sure we "hit" the send_icr bit (13) */
- apicicr_low = (apicicr_low | (1 << 13));
- /*
- * Ensure that the interrupt is ordered w.r.t. previous stores
- * to main memory. Fence instructions are not implemented in X100
- * since execution is in order but a compiler barrier is still
- * required.
- */
- wmb();
- mic_mmio_write(mw, apicicr_low,
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
-}
-
-static void mic_x100_send_rdmasr_intr(struct mic_mw *mw, int doorbell)
-{
- int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
- /*
- * Ensure that the interrupt is ordered w.r.t. previous stores
- * to main memory. Fence instructions are not implemented in X100
- * since execution is in order but a compiler barrier is still
- * required.
- */
- wmb();
- mic_mmio_write(mw, 0, MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
-}
-
-/**
- * mic_ack_interrupt - Device specific interrupt handling.
- * @mdev: pointer to mic_device instance
- *
- * Returns: bitmask of doorbell events triggered.
- */
-u32 mic_ack_interrupt(struct mic_device *mdev)
-{
- return 0;
-}
-
-static inline int mic_get_sbox_irq(int db)
-{
- return MIC_X100_IRQ_BASE + db;
-}
-
-static inline int mic_get_rdmasr_irq(int index)
-{
- return MIC_X100_RDMASR_IRQ_BASE + index;
-}
-
-void mic_send_p2p_intr(int db, struct mic_mw *mw)
-{
- int rdmasr_index;
-
- if (db < MIC_X100_NUM_SBOX_IRQ) {
- mic_x100_send_sbox_intr(mw, db);
- } else {
- rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
- mic_x100_send_rdmasr_intr(mw, rdmasr_index);
- }
-}
-
-/**
- * mic_hw_intr_init - Initialize h/w specific interrupt
- * information.
- * @mdrv: pointer to mic_driver
- */
-void mic_hw_intr_init(struct mic_driver *mdrv)
-{
- mdrv->intr_info.num_intr = MIC_X100_NUM_SBOX_IRQ +
- MIC_X100_NUM_RDMASR_IRQ;
-}
-
-/**
- * mic_db_to_irq - Retrieve irq number corresponding to a doorbell.
- * @mdrv: pointer to mic_driver
- * @db: The doorbell obtained for which the irq is needed. Doorbell
- * may correspond to an sbox doorbell or an rdmasr index.
- *
- * Returns the irq corresponding to the doorbell.
- */
-int mic_db_to_irq(struct mic_driver *mdrv, int db)
-{
- int rdmasr_index;
-
- /*
- * The total number of doorbell interrupts on the card are 16. Indices
- * 0-8 falls in the SBOX category and 8-15 fall in the RDMASR category.
- */
- if (db < MIC_X100_NUM_SBOX_IRQ) {
- return mic_get_sbox_irq(db);
- } else {
- rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
- return mic_get_rdmasr_irq(rdmasr_index);
- }
-}
-
-/*
- * mic_card_map - Allocate virtual address for a remote memory region.
- * @mdev: pointer to mic_device instance.
- * @addr: Remote DMA address.
- * @size: Size of the region.
- *
- * Returns: Virtual address backing the remote memory region.
- */
-void __iomem *
-mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size)
-{
- return ioremap(addr, size);
-}
-
-/*
- * mic_card_unmap - Unmap the virtual address for a remote memory region.
- * @mdev: pointer to mic_device instance.
- * @addr: Virtual address for remote memory region.
- *
- * Returns: None.
- */
-void mic_card_unmap(struct mic_device *mdev, void __iomem *addr)
-{
- iounmap(addr);
-}
-
-static inline struct mic_driver *mbdev_to_mdrv(struct mbus_device *mbdev)
-{
- return dev_get_drvdata(mbdev->dev.parent);
-}
-
-static struct mic_irq *
-_mic_request_threaded_irq(struct mbus_device *mbdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src)
-{
- int rc = 0;
- unsigned int irq = intr_src;
- unsigned long cookie = irq;
-
- rc = request_threaded_irq(irq, handler, thread_fn, 0, name, data);
- if (rc) {
- dev_err(mbdev_to_mdrv(mbdev)->dev,
- "request_threaded_irq failed rc = %d\n", rc);
- return ERR_PTR(rc);
- }
- return (struct mic_irq *)cookie;
-}
-
-static void _mic_free_irq(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data)
-{
- unsigned long irq = (unsigned long)cookie;
- free_irq(irq, data);
-}
-
-static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
-{
- mic_ack_interrupt(&mbdev_to_mdrv(mbdev)->mdev);
-}
-
-static struct mbus_hw_ops mbus_hw_ops = {
- .request_threaded_irq = _mic_request_threaded_irq,
- .free_irq = _mic_free_irq,
- .ack_interrupt = _mic_ack_interrupt,
-};
-
-static int __init mic_probe(struct platform_device *pdev)
-{
- struct mic_driver *mdrv = &g_drv;
- struct mic_device *mdev = &mdrv->mdev;
- int rc = 0;
-
- mdrv->dev = &pdev->dev;
- snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
-
- /* FIXME: use dma_set_mask_and_coherent() and check result */
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-
- mdev->mmio.pa = MIC_X100_MMIO_BASE;
- mdev->mmio.len = MIC_X100_MMIO_LEN;
- mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
- MIC_X100_MMIO_LEN);
- if (!mdev->mmio.va) {
- dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
- rc = -EIO;
- goto done;
- }
- mic_hw_intr_init(mdrv);
- platform_set_drvdata(pdev, mdrv);
- mdrv->dma_mbdev = mbus_register_device(mdrv->dev, MBUS_DEV_DMA_MIC,
- NULL, &mbus_hw_ops, 0,
- mdrv->mdev.mmio.va);
- if (IS_ERR(mdrv->dma_mbdev)) {
- rc = PTR_ERR(mdrv->dma_mbdev);
- dev_err(&pdev->dev, "mbus_add_device failed rc %d\n", rc);
- goto done;
- }
- rc = mic_driver_init(mdrv);
- if (rc) {
- dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc);
- goto remove_dma;
- }
-done:
- return rc;
-remove_dma:
- mbus_unregister_device(mdrv->dma_mbdev);
- return rc;
-}
-
-static int mic_remove(struct platform_device *pdev)
-{
- struct mic_driver *mdrv = &g_drv;
-
- mic_driver_uninit(mdrv);
- mbus_unregister_device(mdrv->dma_mbdev);
- return 0;
-}
-
-static void mic_platform_shutdown(struct platform_device *pdev)
-{
- mic_remove(pdev);
-}
-
-static struct platform_driver __refdata mic_platform_driver = {
- .probe = mic_probe,
- .remove = mic_remove,
- .shutdown = mic_platform_shutdown,
- .driver = {
- .name = mic_driver_name,
- },
-};
-
-static struct platform_device *mic_platform_dev;
-
-static int __init mic_init(void)
-{
- int ret;
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- if (!(c->x86 == 11 && c->x86_model == 1)) {
- ret = -ENODEV;
- pr_err("%s not running on X100 ret %d\n", __func__, ret);
- goto done;
- }
-
- request_module("mic_x100_dma");
- mic_init_card_debugfs();
-
- mic_platform_dev = platform_device_register_simple(mic_driver_name,
- 0, NULL, 0);
- ret = PTR_ERR_OR_ZERO(mic_platform_dev);
- if (ret) {
- pr_err("platform_device_register_full ret %d\n", ret);
- goto cleanup_debugfs;
- }
- ret = platform_driver_register(&mic_platform_driver);
- if (ret) {
- pr_err("platform_driver_register ret %d\n", ret);
- goto device_unregister;
- }
- return ret;
-
-device_unregister:
- platform_device_unregister(mic_platform_dev);
-cleanup_debugfs:
- mic_exit_card_debugfs();
-done:
- return ret;
-}
-
-static void __exit mic_exit(void)
-{
- platform_driver_unregister(&mic_platform_driver);
- platform_device_unregister(mic_platform_dev);
- mic_exit_card_debugfs();
-}
-
-module_init(mic_init);
-module_exit(mic_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC X100 Card driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h
deleted file mode 100644
index 46644dde0c07..000000000000
--- a/drivers/misc/mic/card/mic_x100.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#ifndef _MIC_X100_CARD_H_
-#define _MIC_X100_CARD_H_
-
-#define MIC_X100_MMIO_BASE 0x08007C0000ULL
-#define MIC_X100_MMIO_LEN 0x00020000ULL
-#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000ULL
-
-#define MIC_X100_SBOX_SPAD0 0x0000AB20
-#define MIC_X100_SBOX_SDBIC0 0x0000CC90
-#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000
-#define MIC_X100_SBOX_RDMASR0 0x0000B180
-#define MIC_X100_SBOX_APICICR0 0x0000A9D0
-
-#define MIC_X100_MAX_DOORBELL_IDX 8
-
-#define MIC_X100_NUM_SBOX_IRQ 8
-#define MIC_X100_NUM_RDMASR_IRQ 8
-#define MIC_X100_SBOX_IRQ_BASE 0
-#define MIC_X100_RDMASR_IRQ_BASE 17
-
-#define MIC_X100_IRQ_BASE 26
-
-#endif
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
deleted file mode 100644
index f94f08df0260..000000000000
--- a/drivers/misc/mic/common/mic_dev.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC driver.
- */
-#ifndef __MIC_DEV_H__
-#define __MIC_DEV_H__
-
-/* The maximum number of MIC devices supported in a single host system. */
-#define MIC_MAX_NUM_DEVS 128
-
-/**
- * enum mic_hw_family - The hardware family to which a device belongs.
- */
-enum mic_hw_family {
- MIC_FAMILY_X100 = 0,
- MIC_FAMILY_X200,
- MIC_FAMILY_UNKNOWN,
- MIC_FAMILY_LAST
-};
-
-/**
- * struct mic_mw - MIC memory window
- *
- * @pa: Base physical address.
- * @va: Base ioremap'd virtual address.
- * @len: Size of the memory window.
- */
-struct mic_mw {
- phys_addr_t pa;
- void __iomem *va;
- resource_size_t len;
-};
-
-/*
- * Scratch pad register offsets used by the host to communicate
- * device page DMA address to the card.
- */
-#define MIC_DPLO_SPAD 14
-#define MIC_DPHI_SPAD 15
-
-/*
- * These values are supposed to be in the config_change field of the
- * device page when the host sends a config change interrupt to the card.
- */
-#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1
-#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2
-
-/* Maximum number of DMA channels */
-#define MIC_MAX_DMA_CHAN 4
-
-#endif
diff --git a/drivers/misc/mic/cosm/Makefile b/drivers/misc/mic/cosm/Makefile
deleted file mode 100644
index 97d74cb12030..000000000000
--- a/drivers/misc/mic/cosm/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Coprocessor State Management (COSM) Driver
-# Copyright(c) 2015, Intel Corporation.
-#
-obj-$(CONFIG_MIC_COSM) += mic_cosm.o
-
-mic_cosm-objs := cosm_main.o
-mic_cosm-objs += cosm_debugfs.o
-mic_cosm-objs += cosm_sysfs.o
-mic_cosm-objs += cosm_scif_server.o
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
deleted file mode 100644
index cb55653cf1f9..000000000000
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include "cosm_main.h"
-
-/* Debugfs parent dir */
-static struct dentry *cosm_dbg;
-
-/*
- * log_buf_show - Display MIC kernel log buffer
- *
- * log_buf addr/len is read from System.map by user space
- * and populated in sysfs entries.
- */
-static int log_buf_show(struct seq_file *s, void *unused)
-{
- void __iomem *log_buf_va;
- int __iomem *log_buf_len_va;
- struct cosm_device *cdev = s->private;
- void *kva;
- int size;
- u64 aper_offset;
-
- if (!cdev || !cdev->log_buf_addr || !cdev->log_buf_len)
- goto done;
-
- mutex_lock(&cdev->cosm_mutex);
- switch (cdev->state) {
- case MIC_BOOTING:
- case MIC_ONLINE:
- case MIC_SHUTTING_DOWN:
- break;
- default:
- goto unlock;
- }
-
- /*
- * Card kernel will never be relocated and any kernel text/data mapping
- * can be translated to phys address by subtracting __START_KERNEL_map.
- */
- aper_offset = (u64)cdev->log_buf_len - __START_KERNEL_map;
- log_buf_len_va = cdev->hw_ops->aper(cdev)->va + aper_offset;
- aper_offset = (u64)cdev->log_buf_addr - __START_KERNEL_map;
- log_buf_va = cdev->hw_ops->aper(cdev)->va + aper_offset;
-
- size = ioread32(log_buf_len_va);
- kva = kmalloc(size, GFP_KERNEL);
- if (!kva)
- goto unlock;
-
- memcpy_fromio(kva, log_buf_va, size);
- seq_write(s, kva, size);
- kfree(kva);
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
-done:
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(log_buf);
-
-/*
- * force_reset_show - Force MIC reset
- *
- * Invokes the force_reset COSM bus op instead of the standard reset
- * op in case a force reset of the MIC device is required
- */
-static int force_reset_show(struct seq_file *s, void *pos)
-{
- struct cosm_device *cdev = s->private;
-
- cosm_stop(cdev, true);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(force_reset);
-
-void cosm_create_debug_dir(struct cosm_device *cdev)
-{
- char name[16];
-
- if (!cosm_dbg)
- return;
-
- scnprintf(name, sizeof(name), "mic%d", cdev->index);
- cdev->dbg_dir = debugfs_create_dir(name, cosm_dbg);
-
- debugfs_create_file("log_buf", 0444, cdev->dbg_dir, cdev,
- &log_buf_fops);
- debugfs_create_file("force_reset", 0444, cdev->dbg_dir, cdev,
- &force_reset_fops);
-}
-
-void cosm_delete_debug_dir(struct cosm_device *cdev)
-{
- debugfs_remove_recursive(cdev->dbg_dir);
-}
-
-void cosm_init_debugfs(void)
-{
- cosm_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-void cosm_exit_debugfs(void)
-{
- debugfs_remove(cosm_dbg);
-}
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
deleted file mode 100644
index ebb0eac43754..000000000000
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/slab.h>
-#include <linux/cred.h>
-#include "cosm_main.h"
-
-static const char cosm_driver_name[] = "mic";
-
-/* COSM ID allocator */
-static struct ida g_cosm_ida;
-/* Class of MIC devices for sysfs accessibility. */
-static struct class *g_cosm_class;
-/* Number of MIC devices */
-static atomic_t g_num_dev;
-
-/**
- * cosm_hw_reset - Issue a HW reset for the MIC device
- * @cdev: pointer to cosm_device instance
- * @force: force a MIC to reset even if it is already reset and ready
- */
-static void cosm_hw_reset(struct cosm_device *cdev, bool force)
-{
- int i;
-
-#define MIC_RESET_TO (45)
- if (force && cdev->hw_ops->force_reset)
- cdev->hw_ops->force_reset(cdev);
- else
- cdev->hw_ops->reset(cdev);
-
- for (i = 0; i < MIC_RESET_TO; i++) {
- if (cdev->hw_ops->ready(cdev)) {
- cosm_set_state(cdev, MIC_READY);
- return;
- }
- /*
- * Resets typically take 10s of seconds to complete.
- * Since an MMIO read is required to check if the
- * firmware is ready or not, a 1 second delay works nicely.
- */
- msleep(1000);
- }
- cosm_set_state(cdev, MIC_RESET_FAILED);
-}
-
-/**
- * cosm_start - Start the MIC
- * @cdev: pointer to cosm_device instance
- *
- * This function prepares an MIC for boot and initiates boot.
- * RETURNS: An appropriate -ERRNO error value on error, or 0 for success.
- */
-int cosm_start(struct cosm_device *cdev)
-{
- const struct cred *orig_cred;
- struct cred *override_cred;
- int rc;
-
- mutex_lock(&cdev->cosm_mutex);
- if (!cdev->bootmode) {
- dev_err(&cdev->dev, "%s %d bootmode not set\n",
- __func__, __LINE__);
- rc = -EINVAL;
- goto unlock_ret;
- }
-retry:
- if (cdev->state != MIC_READY) {
- dev_err(&cdev->dev, "%s %d MIC state not READY\n",
- __func__, __LINE__);
- rc = -EINVAL;
- goto unlock_ret;
- }
- if (!cdev->hw_ops->ready(cdev)) {
- cosm_hw_reset(cdev, false);
- /*
- * The state will either be MIC_READY if the reset succeeded
- * or MIC_RESET_FAILED if the firmware reset failed.
- */
- goto retry;
- }
-
- /*
- * Set credentials to root to allow non-root user to download initramsfs
- * with 600 permissions
- */
- override_cred = prepare_creds();
- if (!override_cred) {
- dev_err(&cdev->dev, "%s %d prepare_creds failed\n",
- __func__, __LINE__);
- rc = -ENOMEM;
- goto unlock_ret;
- }
- override_cred->fsuid = GLOBAL_ROOT_UID;
- orig_cred = override_creds(override_cred);
-
- rc = cdev->hw_ops->start(cdev, cdev->index);
-
- revert_creds(orig_cred);
- put_cred(override_cred);
- if (rc)
- goto unlock_ret;
-
- /*
- * If linux is being booted, card is treated 'online' only
- * when the scif interface in the card is up. If anything else
- * is booted, we set card to 'online' immediately.
- */
- if (!strcmp(cdev->bootmode, "linux"))
- cosm_set_state(cdev, MIC_BOOTING);
- else
- cosm_set_state(cdev, MIC_ONLINE);
-unlock_ret:
- mutex_unlock(&cdev->cosm_mutex);
- if (rc)
- dev_err(&cdev->dev, "cosm_start failed rc %d\n", rc);
- return rc;
-}
-
-/**
- * cosm_stop - Prepare the MIC for reset and trigger reset
- * @cdev: pointer to cosm_device instance
- * @force: force a MIC to reset even if it is already reset and ready.
- *
- * RETURNS: None
- */
-void cosm_stop(struct cosm_device *cdev, bool force)
-{
- mutex_lock(&cdev->cosm_mutex);
- if (cdev->state != MIC_READY || force) {
- /*
- * Don't call hw_ops if they have been called previously.
- * stop(..) calls device_unregister and will crash the system if
- * called multiple times.
- */
- u8 state = cdev->state == MIC_RESETTING ?
- cdev->prev_state : cdev->state;
- bool call_hw_ops = state != MIC_RESET_FAILED &&
- state != MIC_READY;
-
- if (cdev->state != MIC_RESETTING)
- cosm_set_state(cdev, MIC_RESETTING);
- cdev->heartbeat_watchdog_enable = false;
- if (call_hw_ops)
- cdev->hw_ops->stop(cdev, force);
- cosm_hw_reset(cdev, force);
- cosm_set_shutdown_status(cdev, MIC_NOP);
- if (call_hw_ops && cdev->hw_ops->post_reset)
- cdev->hw_ops->post_reset(cdev, cdev->state);
- }
- mutex_unlock(&cdev->cosm_mutex);
- flush_work(&cdev->scif_work);
-}
-
-/**
- * cosm_reset_trigger_work - Trigger MIC reset
- * @work: The work structure
- *
- * This work is scheduled whenever the host wants to reset the MIC.
- */
-static void cosm_reset_trigger_work(struct work_struct *work)
-{
- struct cosm_device *cdev = container_of(work, struct cosm_device,
- reset_trigger_work);
- cosm_stop(cdev, false);
-}
-
-/**
- * cosm_reset - Schedule MIC reset
- * @cdev: pointer to cosm_device instance
- *
- * RETURNS: An -EINVAL if the card is already READY or 0 for success.
- */
-int cosm_reset(struct cosm_device *cdev)
-{
- int rc = 0;
-
- mutex_lock(&cdev->cosm_mutex);
- if (cdev->state != MIC_READY) {
- if (cdev->state != MIC_RESETTING) {
- cdev->prev_state = cdev->state;
- cosm_set_state(cdev, MIC_RESETTING);
- schedule_work(&cdev->reset_trigger_work);
- }
- } else {
- dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
- rc = -EINVAL;
- }
- mutex_unlock(&cdev->cosm_mutex);
- return rc;
-}
-
-/**
- * cosm_shutdown - Initiate MIC shutdown.
- * @cdev: pointer to cosm_device instance
- *
- * RETURNS: None
- */
-int cosm_shutdown(struct cosm_device *cdev)
-{
- struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN };
- int rc = 0;
-
- mutex_lock(&cdev->cosm_mutex);
- if (cdev->state != MIC_ONLINE) {
- rc = -EINVAL;
- dev_err(&cdev->dev, "%s %d skipping shutdown in state: %s\n",
- __func__, __LINE__, cosm_state_string[cdev->state]);
- goto err;
- }
-
- if (!cdev->epd) {
- rc = -ENOTCONN;
- dev_err(&cdev->dev, "%s %d scif endpoint not connected rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
-
- rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0) {
- dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
- cdev->heartbeat_watchdog_enable = false;
- cosm_set_state(cdev, MIC_SHUTTING_DOWN);
- rc = 0;
-err:
- mutex_unlock(&cdev->cosm_mutex);
- return rc;
-}
-
-static int cosm_driver_probe(struct cosm_device *cdev)
-{
- int rc;
-
- /* Initialize SCIF server at first probe */
- if (atomic_add_return(1, &g_num_dev) == 1) {
- rc = cosm_scif_init();
- if (rc)
- goto scif_exit;
- }
- mutex_init(&cdev->cosm_mutex);
- INIT_WORK(&cdev->reset_trigger_work, cosm_reset_trigger_work);
- INIT_WORK(&cdev->scif_work, cosm_scif_work);
- cdev->sysfs_heartbeat_enable = true;
- cosm_sysfs_init(cdev);
- cdev->sdev = device_create_with_groups(g_cosm_class, cdev->dev.parent,
- MKDEV(0, cdev->index), cdev, cdev->attr_group,
- "mic%d", cdev->index);
- if (IS_ERR(cdev->sdev)) {
- rc = PTR_ERR(cdev->sdev);
- dev_err(&cdev->dev, "device_create_with_groups failed rc %d\n",
- rc);
- goto scif_exit;
- }
-
- cdev->state_sysfs = sysfs_get_dirent(cdev->sdev->kobj.sd,
- "state");
- if (!cdev->state_sysfs) {
- rc = -ENODEV;
- dev_err(&cdev->dev, "sysfs_get_dirent failed rc %d\n", rc);
- goto destroy_device;
- }
- cosm_create_debug_dir(cdev);
- return 0;
-destroy_device:
- device_destroy(g_cosm_class, MKDEV(0, cdev->index));
-scif_exit:
- if (atomic_dec_and_test(&g_num_dev))
- cosm_scif_exit();
- return rc;
-}
-
-static void cosm_driver_remove(struct cosm_device *cdev)
-{
- cosm_delete_debug_dir(cdev);
- sysfs_put(cdev->state_sysfs);
- device_destroy(g_cosm_class, MKDEV(0, cdev->index));
- flush_work(&cdev->reset_trigger_work);
- cosm_stop(cdev, false);
- if (atomic_dec_and_test(&g_num_dev))
- cosm_scif_exit();
-
- /* These sysfs entries might have allocated */
- kfree(cdev->cmdline);
- kfree(cdev->firmware);
- kfree(cdev->ramdisk);
- kfree(cdev->bootmode);
-}
-
-static int cosm_suspend(struct device *dev)
-{
- struct cosm_device *cdev = dev_to_cosm(dev);
-
- mutex_lock(&cdev->cosm_mutex);
- switch (cdev->state) {
- /**
- * Suspend/freeze hooks in userspace have already shutdown the card.
- * Card should be 'ready' in most cases. It is however possible that
- * some userspace application initiated a boot. In those cases, we
- * simply reset the card.
- */
- case MIC_ONLINE:
- case MIC_BOOTING:
- case MIC_SHUTTING_DOWN:
- mutex_unlock(&cdev->cosm_mutex);
- cosm_stop(cdev, false);
- break;
- default:
- mutex_unlock(&cdev->cosm_mutex);
- break;
- }
- return 0;
-}
-
-static const struct dev_pm_ops cosm_pm_ops = {
- .suspend = cosm_suspend,
- .freeze = cosm_suspend
-};
-
-static struct cosm_driver cosm_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- .pm = &cosm_pm_ops,
- },
- .probe = cosm_driver_probe,
- .remove = cosm_driver_remove
-};
-
-static int __init cosm_init(void)
-{
- int ret;
-
- cosm_init_debugfs();
-
- g_cosm_class = class_create(THIS_MODULE, cosm_driver_name);
- if (IS_ERR(g_cosm_class)) {
- ret = PTR_ERR(g_cosm_class);
- pr_err("class_create failed ret %d\n", ret);
- goto cleanup_debugfs;
- }
-
- ida_init(&g_cosm_ida);
- ret = cosm_register_driver(&cosm_driver);
- if (ret) {
- pr_err("cosm_register_driver failed ret %d\n", ret);
- goto ida_destroy;
- }
- return 0;
-ida_destroy:
- ida_destroy(&g_cosm_ida);
- class_destroy(g_cosm_class);
-cleanup_debugfs:
- cosm_exit_debugfs();
- return ret;
-}
-
-static void __exit cosm_exit(void)
-{
- cosm_unregister_driver(&cosm_driver);
- ida_destroy(&g_cosm_ida);
- class_destroy(g_cosm_class);
- cosm_exit_debugfs();
-}
-
-module_init(cosm_init);
-module_exit(cosm_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC Coprocessor State Management (COSM) Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h
deleted file mode 100644
index 5188ad245814..000000000000
--- a/drivers/misc/mic/cosm/cosm_main.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-#ifndef _COSM_COSM_H_
-#define _COSM_COSM_H_
-
-#include <linux/scif.h>
-#include "../bus/cosm_bus.h"
-
-#define COSM_HEARTBEAT_SEND_SEC 30
-#define SCIF_COSM_LISTEN_PORT 201
-
-/**
- * enum COSM msg id's
- * @COSM_MSG_SHUTDOWN: host->card trigger shutdown
- * @COSM_MSG_SYNC_TIME: host->card send host time to card to sync time
- * @COSM_MSG_HEARTBEAT: card->host heartbeat
- * @COSM_MSG_SHUTDOWN_STATUS: card->host with shutdown status as payload
- */
-enum cosm_msg_id {
- COSM_MSG_SHUTDOWN,
- COSM_MSG_SYNC_TIME,
- COSM_MSG_HEARTBEAT,
- COSM_MSG_SHUTDOWN_STATUS,
-};
-
-struct cosm_msg {
- u64 id;
- union {
- u64 shutdown_status;
- struct {
- u64 tv_sec;
- u64 tv_nsec;
- } timespec;
- };
-};
-
-extern const char * const cosm_state_string[];
-extern const char * const cosm_shutdown_status_string[];
-
-void cosm_sysfs_init(struct cosm_device *cdev);
-int cosm_start(struct cosm_device *cdev);
-void cosm_stop(struct cosm_device *cdev, bool force);
-int cosm_reset(struct cosm_device *cdev);
-int cosm_shutdown(struct cosm_device *cdev);
-void cosm_set_state(struct cosm_device *cdev, u8 state);
-void cosm_set_shutdown_status(struct cosm_device *cdev, u8 status);
-void cosm_init_debugfs(void);
-void cosm_exit_debugfs(void);
-void cosm_create_debug_dir(struct cosm_device *cdev);
-void cosm_delete_debug_dir(struct cosm_device *cdev);
-int cosm_scif_init(void);
-void cosm_scif_exit(void);
-void cosm_scif_work(struct work_struct *work);
-
-#endif
diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c
deleted file mode 100644
index 7baec9fd8756..000000000000
--- a/drivers/misc/mic/cosm/cosm_scif_server.c
+++ /dev/null
@@ -1,399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-#include <linux/kthread.h>
-#include <linux/sched/signal.h>
-
-#include "cosm_main.h"
-
-/*
- * The COSM driver uses SCIF to communicate between the management node and the
- * MIC cards. SCIF is used to (a) Send a shutdown command to the card (b)
- * receive a shutdown status back from the card upon completion of shutdown and
- * (c) receive periodic heartbeat messages from the card used to deduce if the
- * card has crashed.
- *
- * A COSM server consisting of a SCIF listening endpoint waits for incoming
- * connections from the card. Upon acceptance of the connection, a separate
- * work-item is scheduled to handle SCIF message processing for that card. The
- * life-time of this work-item is therefore the time from which the connection
- * from a card is accepted to the time at which the connection is closed. A new
- * work-item starts each time the card boots and is alive till the card (a)
- * shuts down (b) is reset (c) crashes (d) cosm_client driver on the card is
- * unloaded.
- *
- * From the point of view of COSM interactions with SCIF during card
- * shutdown, reset and crash are as follows:
- *
- * Card shutdown
- * -------------
- * 1. COSM client on the card invokes orderly_poweroff() in response to SHUTDOWN
- * message from the host.
- * 2. Card driver shutdown callback invokes scif_unregister_device(..) resulting
- * in scif_remove(..) getting called on the card
- * 3. scif_remove -> scif_stop -> scif_handle_remove_node ->
- * scif_peer_unregister_device -> device_unregister for the host peer device
- * 4. During device_unregister remove(..) method of cosm_client is invoked which
- * closes the COSM SCIF endpoint on the card. This results in a SCIF_DISCNCT
- * message being sent to host SCIF. SCIF_DISCNCT message processing on the
- * host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes
- * up the host COSM thread blocked in scif_poll(..) resulting in
- * scif_poll(..) returning EPOLLHUP.
- * 5. On the card, scif_peer_release_dev is next called which results in an
- * SCIF_EXIT message being sent to the host and after receiving the
- * SCIF_EXIT_ACK from the host the peer device teardown on the card is
- * complete.
- * 6. As part of the SCIF_EXIT message processing on the host, host sends a
- * SCIF_REMOVE_NODE to itself corresponding to the card being removed. This
- * starts a similar SCIF peer device teardown sequence on the host
- * corresponding to the card being shut down.
- *
- * Card reset
- * ----------
- * The case of interest here is when the card has not been previously shut down
- * since most of the steps below are skipped in that case:
-
- * 1. cosm_stop(..) invokes hw_ops->stop(..) method of the base PCIe driver
- * which unregisters the SCIF HW device resulting in scif_remove(..) being
- * called on the host.
- * 2. scif_remove(..) calls scif_disconnect_node(..) which results in a
- * SCIF_EXIT message being sent to the card.
- * 3. The card executes scif_stop() as part of SCIF_EXIT message
- * processing. This results in the COSM endpoint on the card being closed and
- * the SCIF host peer device on the card getting unregistered similar to
- * steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the
- * host returns EPOLLHUP as a result.
- * 4. On the host, card peer device unregister and SCIF HW remove(..) also
- * subsequently complete.
- *
- * Card crash
- * ----------
- * If a reset is issued after the card has crashed, there is no SCIF_DISCNT
- * message from the card which would result in scif_poll(..) returning
- * EPOLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE
- * message to itself resulting in the card SCIF peer device being unregistered,
- * this results in a scif_peer_release_dev -> scif_cleanup_scifdev->
- * scif_invalidate_ep call sequence which sets the endpoint state to
- * DISCONNECTED and results in scif_poll(..) returning EPOLLHUP.
- */
-
-#define COSM_SCIF_BACKLOG 16
-#define COSM_HEARTBEAT_CHECK_DELTA_SEC 10
-#define COSM_HEARTBEAT_TIMEOUT_SEC \
- (COSM_HEARTBEAT_SEND_SEC + COSM_HEARTBEAT_CHECK_DELTA_SEC)
-#define COSM_HEARTBEAT_TIMEOUT_MSEC (COSM_HEARTBEAT_TIMEOUT_SEC * MSEC_PER_SEC)
-
-static struct task_struct *server_thread;
-static scif_epd_t listen_epd;
-
-/* Publish MIC card's shutdown status to user space MIC daemon */
-static void cosm_update_mic_status(struct cosm_device *cdev)
-{
- if (cdev->shutdown_status_int != MIC_NOP) {
- cosm_set_shutdown_status(cdev, cdev->shutdown_status_int);
- cdev->shutdown_status_int = MIC_NOP;
- }
-}
-
-/* Store MIC card's shutdown status internally when it is received */
-static void cosm_shutdown_status_int(struct cosm_device *cdev,
- enum mic_status shutdown_status)
-{
- switch (shutdown_status) {
- case MIC_HALTED:
- case MIC_POWER_OFF:
- case MIC_RESTART:
- case MIC_CRASHED:
- break;
- default:
- dev_err(&cdev->dev, "%s %d Unexpected shutdown_status %d\n",
- __func__, __LINE__, shutdown_status);
- return;
- };
- cdev->shutdown_status_int = shutdown_status;
- cdev->heartbeat_watchdog_enable = false;
-
- if (cdev->state != MIC_SHUTTING_DOWN)
- cosm_set_state(cdev, MIC_SHUTTING_DOWN);
-}
-
-/* Non-blocking recv. Read and process all available messages */
-static void cosm_scif_recv(struct cosm_device *cdev)
-{
- struct cosm_msg msg;
- int rc;
-
- while (1) {
- rc = scif_recv(cdev->epd, &msg, sizeof(msg), 0);
- if (!rc) {
- break;
- } else if (rc < 0) {
- dev_dbg(&cdev->dev, "%s: %d rc %d\n",
- __func__, __LINE__, rc);
- break;
- }
- dev_dbg(&cdev->dev, "%s: %d rc %d id 0x%llx\n",
- __func__, __LINE__, rc, msg.id);
-
- switch (msg.id) {
- case COSM_MSG_SHUTDOWN_STATUS:
- cosm_shutdown_status_int(cdev, msg.shutdown_status);
- break;
- case COSM_MSG_HEARTBEAT:
- /* Nothing to do, heartbeat only unblocks scif_poll */
- break;
- default:
- dev_err(&cdev->dev, "%s: %d unknown msg.id %lld\n",
- __func__, __LINE__, msg.id);
- break;
- }
- }
-}
-
-/* Publish crashed status for this MIC card */
-static void cosm_set_crashed(struct cosm_device *cdev)
-{
- dev_err(&cdev->dev, "node alive timeout\n");
- cosm_shutdown_status_int(cdev, MIC_CRASHED);
- cosm_update_mic_status(cdev);
-}
-
-/* Send host time to the MIC card to sync system time between host and MIC */
-static void cosm_send_time(struct cosm_device *cdev)
-{
- struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME };
- struct timespec64 ts;
- int rc;
-
- ktime_get_real_ts64(&ts);
- msg.timespec.tv_sec = ts.tv_sec;
- msg.timespec.tv_nsec = ts.tv_nsec;
-
- rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0)
- dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
- __func__, __LINE__, rc);
-}
-
-/*
- * Close this cosm_device's endpoint after its peer endpoint on the card has
- * been closed. In all cases except MIC card crash EPOLLHUP on the host is
- * triggered by the client's endpoint being closed.
- */
-static void cosm_scif_close(struct cosm_device *cdev)
-{
- /*
- * Because SHUTDOWN_STATUS message is sent by the MIC cards in the
- * reboot notifier when shutdown is still not complete, we notify mpssd
- * to reset the card when SCIF endpoint is closed.
- */
- cosm_update_mic_status(cdev);
- scif_close(cdev->epd);
- cdev->epd = NULL;
- dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
-}
-
-/*
- * Set card state to ONLINE when a new SCIF connection from a MIC card is
- * received. Normally the state is BOOTING when the connection comes in, but can
- * be ONLINE if cosm_client driver on the card was unloaded and then reloaded.
- */
-static int cosm_set_online(struct cosm_device *cdev)
-{
- int rc = 0;
-
- if (MIC_BOOTING == cdev->state || MIC_ONLINE == cdev->state) {
- cdev->heartbeat_watchdog_enable = cdev->sysfs_heartbeat_enable;
- cdev->epd = cdev->newepd;
- if (cdev->state == MIC_BOOTING)
- cosm_set_state(cdev, MIC_ONLINE);
- cosm_send_time(cdev);
- dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
- } else {
- dev_warn(&cdev->dev, "%s %d not going online in state: %s\n",
- __func__, __LINE__, cosm_state_string[cdev->state]);
- rc = -EINVAL;
- }
- /* Drop reference acquired by bus_find_device in the server thread */
- put_device(&cdev->dev);
- return rc;
-}
-
-/*
- * Work function for handling work for a SCIF connection from a particular MIC
- * card. It first sets the card state to ONLINE and then calls scif_poll to
- * block on activity such as incoming messages on the SCIF endpoint. When the
- * endpoint is closed, the work function exits, completing its life cycle, from
- * MIC card boot to card shutdown/reset/crash.
- */
-void cosm_scif_work(struct work_struct *work)
-{
- struct cosm_device *cdev = container_of(work, struct cosm_device,
- scif_work);
- struct scif_pollepd pollepd;
- int rc;
-
- mutex_lock(&cdev->cosm_mutex);
- if (cosm_set_online(cdev))
- goto exit;
-
- while (1) {
- pollepd.epd = cdev->epd;
- pollepd.events = EPOLLIN;
-
- /* Drop the mutex before blocking in scif_poll(..) */
- mutex_unlock(&cdev->cosm_mutex);
- /* poll(..) with timeout on our endpoint */
- rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_TIMEOUT_MSEC);
- mutex_lock(&cdev->cosm_mutex);
- if (rc < 0) {
- dev_err(&cdev->dev, "%s %d scif_poll rc %d\n",
- __func__, __LINE__, rc);
- continue;
- }
-
- /* There is a message from the card */
- if (pollepd.revents & EPOLLIN)
- cosm_scif_recv(cdev);
-
- /* The peer endpoint is closed or this endpoint disconnected */
- if (pollepd.revents & EPOLLHUP) {
- cosm_scif_close(cdev);
- break;
- }
-
- /* Did we timeout from poll? */
- if (!rc && cdev->heartbeat_watchdog_enable)
- cosm_set_crashed(cdev);
- }
-exit:
- dev_dbg(&cdev->dev, "%s %d exiting\n", __func__, __LINE__);
- mutex_unlock(&cdev->cosm_mutex);
-}
-
-/*
- * COSM SCIF server thread function. Accepts incoming SCIF connections from MIC
- * cards, finds the correct cosm_device to associate that connection with and
- * schedules individual work items for each MIC card.
- */
-static int cosm_scif_server(void *unused)
-{
- struct cosm_device *cdev;
- scif_epd_t newepd;
- struct scif_port_id port_id;
- int rc;
-
- allow_signal(SIGKILL);
-
- while (!kthread_should_stop()) {
- rc = scif_accept(listen_epd, &port_id, &newepd,
- SCIF_ACCEPT_SYNC);
- if (rc < 0) {
- if (-ERESTARTSYS != rc)
- pr_err("%s %d rc %d\n", __func__, __LINE__, rc);
- continue;
- }
-
- /*
- * Associate the incoming connection with a particular
- * cosm_device, COSM device ID == SCIF node ID - 1
- */
- cdev = cosm_find_cdev_by_id(port_id.node - 1);
- if (!cdev)
- continue;
- cdev->newepd = newepd;
- schedule_work(&cdev->scif_work);
- }
-
- pr_debug("%s %d Server thread stopped\n", __func__, __LINE__);
- return 0;
-}
-
-static int cosm_scif_listen(void)
-{
- int rc;
-
- listen_epd = scif_open();
- if (!listen_epd) {
- pr_err("%s %d scif_open failed\n", __func__, __LINE__);
- return -ENOMEM;
- }
-
- rc = scif_bind(listen_epd, SCIF_COSM_LISTEN_PORT);
- if (rc < 0) {
- pr_err("%s %d scif_bind failed rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
-
- rc = scif_listen(listen_epd, COSM_SCIF_BACKLOG);
- if (rc < 0) {
- pr_err("%s %d scif_listen rc %d\n", __func__, __LINE__, rc);
- goto err;
- }
- pr_debug("%s %d listen_epd set up\n", __func__, __LINE__);
- return 0;
-err:
- scif_close(listen_epd);
- listen_epd = NULL;
- return rc;
-}
-
-static void cosm_scif_listen_exit(void)
-{
- pr_debug("%s %d closing listen_epd\n", __func__, __LINE__);
- if (listen_epd) {
- scif_close(listen_epd);
- listen_epd = NULL;
- }
-}
-
-/*
- * Create a listening SCIF endpoint and a server kthread which accepts incoming
- * SCIF connections from MIC cards
- */
-int cosm_scif_init(void)
-{
- int rc = cosm_scif_listen();
-
- if (rc) {
- pr_err("%s %d cosm_scif_listen rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
-
- server_thread = kthread_run(cosm_scif_server, NULL, "cosm_server");
- if (IS_ERR(server_thread)) {
- rc = PTR_ERR(server_thread);
- pr_err("%s %d kthread_run rc %d\n", __func__, __LINE__, rc);
- goto listen_exit;
- }
- return 0;
-listen_exit:
- cosm_scif_listen_exit();
-err:
- return rc;
-}
-
-/* Stop the running server thread and close the listening SCIF endpoint */
-void cosm_scif_exit(void)
-{
- int rc;
-
- if (!IS_ERR_OR_NULL(server_thread)) {
- rc = send_sig(SIGKILL, server_thread, 0);
- if (rc) {
- pr_err("%s %d send_sig rc %d\n",
- __func__, __LINE__, rc);
- return;
- }
- kthread_stop(server_thread);
- }
-
- cosm_scif_listen_exit();
-}
diff --git a/drivers/misc/mic/cosm/cosm_sysfs.c b/drivers/misc/mic/cosm/cosm_sysfs.c
deleted file mode 100644
index e6dac967c1af..000000000000
--- a/drivers/misc/mic/cosm/cosm_sysfs.c
+++ /dev/null
@@ -1,449 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-#include <linux/slab.h>
-#include "cosm_main.h"
-
-/*
- * A state-to-string lookup table, for exposing a human readable state
- * via sysfs. Always keep in sync with enum cosm_states
- */
-const char * const cosm_state_string[] = {
- [MIC_READY] = "ready",
- [MIC_BOOTING] = "booting",
- [MIC_ONLINE] = "online",
- [MIC_SHUTTING_DOWN] = "shutting_down",
- [MIC_RESETTING] = "resetting",
- [MIC_RESET_FAILED] = "reset_failed",
-};
-
-/*
- * A shutdown-status-to-string lookup table, for exposing a human
- * readable state via sysfs. Always keep in sync with enum cosm_shutdown_status
- */
-const char * const cosm_shutdown_status_string[] = {
- [MIC_NOP] = "nop",
- [MIC_CRASHED] = "crashed",
- [MIC_HALTED] = "halted",
- [MIC_POWER_OFF] = "poweroff",
- [MIC_RESTART] = "restart",
-};
-
-void cosm_set_shutdown_status(struct cosm_device *cdev, u8 shutdown_status)
-{
- dev_dbg(&cdev->dev, "Shutdown Status %s -> %s\n",
- cosm_shutdown_status_string[cdev->shutdown_status],
- cosm_shutdown_status_string[shutdown_status]);
- cdev->shutdown_status = shutdown_status;
-}
-
-void cosm_set_state(struct cosm_device *cdev, u8 state)
-{
- dev_dbg(&cdev->dev, "State %s -> %s\n",
- cosm_state_string[cdev->state],
- cosm_state_string[state]);
- cdev->state = state;
- sysfs_notify_dirent(cdev->state_sysfs);
-}
-
-static ssize_t
-family_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return cdev->hw_ops->family(cdev, buf);
-}
-static DEVICE_ATTR_RO(family);
-
-static ssize_t
-stepping_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return cdev->hw_ops->stepping(cdev, buf);
-}
-static DEVICE_ATTR_RO(stepping);
-
-static ssize_t
-state_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev || cdev->state >= MIC_LAST)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- cosm_state_string[cdev->state]);
-}
-
-static ssize_t
-state_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int rc;
-
- if (!cdev)
- return -EINVAL;
-
- if (sysfs_streq(buf, "boot")) {
- rc = cosm_start(cdev);
- goto done;
- }
- if (sysfs_streq(buf, "reset")) {
- rc = cosm_reset(cdev);
- goto done;
- }
-
- if (sysfs_streq(buf, "shutdown")) {
- rc = cosm_shutdown(cdev);
- goto done;
- }
- rc = -EINVAL;
-done:
- if (rc)
- count = rc;
- return count;
-}
-static DEVICE_ATTR_RW(state);
-
-static ssize_t shutdown_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev || cdev->shutdown_status >= MIC_STATUS_LAST)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- cosm_shutdown_status_string[cdev->shutdown_status]);
-}
-static DEVICE_ATTR_RO(shutdown_status);
-
-static ssize_t
-heartbeat_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", cdev->sysfs_heartbeat_enable);
-}
-
-static ssize_t
-heartbeat_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int enable;
- int ret;
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- ret = kstrtoint(buf, 10, &enable);
- if (ret)
- goto unlock;
-
- cdev->sysfs_heartbeat_enable = enable;
- /* if state is not online, cdev->heartbeat_watchdog_enable is 0 */
- if (cdev->state == MIC_ONLINE)
- cdev->heartbeat_watchdog_enable = enable;
- ret = count;
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return ret;
-}
-static DEVICE_ATTR_RW(heartbeat_enable);
-
-static ssize_t
-cmdline_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *cmdline;
-
- if (!cdev)
- return -EINVAL;
-
- cmdline = cdev->cmdline;
-
- if (cmdline)
- return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline);
- return 0;
-}
-
-static ssize_t
-cmdline_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->cmdline);
-
- cdev->cmdline = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->cmdline) {
- count = -ENOMEM;
- goto unlock;
- }
-
- strncpy(cdev->cmdline, buf, count);
-
- if (cdev->cmdline[count - 1] == '\n')
- cdev->cmdline[count - 1] = '\0';
- else
- cdev->cmdline[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(cmdline);
-
-static ssize_t
-firmware_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *firmware;
-
- if (!cdev)
- return -EINVAL;
-
- firmware = cdev->firmware;
-
- if (firmware)
- return scnprintf(buf, PAGE_SIZE, "%s\n", firmware);
- return 0;
-}
-
-static ssize_t
-firmware_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->firmware);
-
- cdev->firmware = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->firmware) {
- count = -ENOMEM;
- goto unlock;
- }
- strncpy(cdev->firmware, buf, count);
-
- if (cdev->firmware[count - 1] == '\n')
- cdev->firmware[count - 1] = '\0';
- else
- cdev->firmware[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(firmware);
-
-static ssize_t
-ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *ramdisk;
-
- if (!cdev)
- return -EINVAL;
-
- ramdisk = cdev->ramdisk;
-
- if (ramdisk)
- return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk);
- return 0;
-}
-
-static ssize_t
-ramdisk_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->ramdisk);
-
- cdev->ramdisk = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->ramdisk) {
- count = -ENOMEM;
- goto unlock;
- }
-
- strncpy(cdev->ramdisk, buf, count);
-
- if (cdev->ramdisk[count - 1] == '\n')
- cdev->ramdisk[count - 1] = '\0';
- else
- cdev->ramdisk[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(ramdisk);
-
-static ssize_t
-bootmode_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *bootmode;
-
- if (!cdev)
- return -EINVAL;
-
- bootmode = cdev->bootmode;
-
- if (bootmode)
- return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode);
- return 0;
-}
-
-static ssize_t
-bootmode_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "flash"))
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->bootmode);
-
- cdev->bootmode = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->bootmode) {
- count = -ENOMEM;
- goto unlock;
- }
-
- strncpy(cdev->bootmode, buf, count);
-
- if (cdev->bootmode[count - 1] == '\n')
- cdev->bootmode[count - 1] = '\0';
- else
- cdev->bootmode[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(bootmode);
-
-static ssize_t
-log_buf_addr_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%p\n", cdev->log_buf_addr);
-}
-
-static ssize_t
-log_buf_addr_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int ret;
- unsigned long addr;
-
- if (!cdev)
- return -EINVAL;
-
- ret = kstrtoul(buf, 16, &addr);
- if (ret)
- goto exit;
-
- cdev->log_buf_addr = (void *)addr;
- ret = count;
-exit:
- return ret;
-}
-static DEVICE_ATTR_RW(log_buf_addr);
-
-static ssize_t
-log_buf_len_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%p\n", cdev->log_buf_len);
-}
-
-static ssize_t
-log_buf_len_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int ret;
- unsigned long addr;
-
- if (!cdev)
- return -EINVAL;
-
- ret = kstrtoul(buf, 16, &addr);
- if (ret)
- goto exit;
-
- cdev->log_buf_len = (int *)addr;
- ret = count;
-exit:
- return ret;
-}
-static DEVICE_ATTR_RW(log_buf_len);
-
-static struct attribute *cosm_default_attrs[] = {
- &dev_attr_family.attr,
- &dev_attr_stepping.attr,
- &dev_attr_state.attr,
- &dev_attr_shutdown_status.attr,
- &dev_attr_heartbeat_enable.attr,
- &dev_attr_cmdline.attr,
- &dev_attr_firmware.attr,
- &dev_attr_ramdisk.attr,
- &dev_attr_bootmode.attr,
- &dev_attr_log_buf_addr.attr,
- &dev_attr_log_buf_len.attr,
-
- NULL
-};
-
-ATTRIBUTE_GROUPS(cosm_default);
-
-void cosm_sysfs_init(struct cosm_device *cdev)
-{
- cdev->attr_group = cosm_default_groups;
-}
diff --git a/drivers/misc/mic/cosm_client/Makefile b/drivers/misc/mic/cosm_client/Makefile
deleted file mode 100644
index 5b62270bc2ab..000000000000
--- a/drivers/misc/mic/cosm_client/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile - Intel MIC COSM Client Driver
-# Copyright(c) 2015, Intel Corporation.
-#
-obj-$(CONFIG_MIC_COSM) += cosm_client.o
-
-cosm_client-objs += cosm_scif_client.o
diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c
deleted file mode 100644
index a03213dd9319..000000000000
--- a/drivers/misc/mic/cosm_client/cosm_scif_client.c
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC COSM Client Driver
- */
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/kthread.h>
-#include <linux/sched/signal.h>
-
-#include "../cosm/cosm_main.h"
-
-#define COSM_SCIF_MAX_RETRIES 10
-#define COSM_HEARTBEAT_SEND_MSEC (COSM_HEARTBEAT_SEND_SEC * MSEC_PER_SEC)
-
-static struct task_struct *client_thread;
-static scif_epd_t client_epd;
-static struct scif_peer_dev *client_spdev;
-
-/*
- * Reboot notifier: receives shutdown status from the OS and communicates it
- * back to the COSM process on the host
- */
-static int cosm_reboot_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN_STATUS };
- int rc;
-
- event = (event == SYS_RESTART) ? SYSTEM_RESTART : event;
- dev_info(&client_spdev->dev, "%s %d received event %ld\n",
- __func__, __LINE__, event);
-
- msg.shutdown_status = event;
- rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0)
- dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
- __func__, __LINE__, rc);
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block cosm_reboot = {
- .notifier_call = cosm_reboot_event,
-};
-
-/* Set system time from timespec value received from the host */
-static void cosm_set_time(struct cosm_msg *msg)
-{
- struct timespec64 ts = {
- .tv_sec = msg->timespec.tv_sec,
- .tv_nsec = msg->timespec.tv_nsec,
- };
- int rc = do_settimeofday64(&ts);
-
- if (rc)
- dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n",
- __func__, __LINE__, rc);
-}
-
-/* COSM client receive message processing */
-static void cosm_client_recv(void)
-{
- struct cosm_msg msg;
- int rc;
-
- while (1) {
- rc = scif_recv(client_epd, &msg, sizeof(msg), 0);
- if (!rc) {
- return;
- } else if (rc < 0) {
- dev_err(&client_spdev->dev, "%s: %d rc %d\n",
- __func__, __LINE__, rc);
- return;
- }
-
- dev_dbg(&client_spdev->dev, "%s: %d rc %d id 0x%llx\n",
- __func__, __LINE__, rc, msg.id);
-
- switch (msg.id) {
- case COSM_MSG_SYNC_TIME:
- cosm_set_time(&msg);
- break;
- case COSM_MSG_SHUTDOWN:
- orderly_poweroff(true);
- break;
- default:
- dev_err(&client_spdev->dev, "%s: %d unknown id %lld\n",
- __func__, __LINE__, msg.id);
- break;
- }
- }
-}
-
-/* Initiate connection to the COSM server on the host */
-static int cosm_scif_connect(void)
-{
- struct scif_port_id port_id;
- int i, rc;
-
- client_epd = scif_open();
- if (!client_epd) {
- dev_err(&client_spdev->dev, "%s %d scif_open failed\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- port_id.node = 0;
- port_id.port = SCIF_COSM_LISTEN_PORT;
-
- for (i = 0; i < COSM_SCIF_MAX_RETRIES; i++) {
- rc = scif_connect(client_epd, &port_id);
- if (rc < 0)
- msleep(1000);
- else
- break;
- }
-
- if (rc < 0) {
- dev_err(&client_spdev->dev, "%s %d scif_connect rc %d\n",
- __func__, __LINE__, rc);
- scif_close(client_epd);
- client_epd = NULL;
- }
- return rc < 0 ? rc : 0;
-}
-
-/* Close host SCIF connection */
-static void cosm_scif_connect_exit(void)
-{
- if (client_epd) {
- scif_close(client_epd);
- client_epd = NULL;
- }
-}
-
-/*
- * COSM SCIF client thread function: waits for messages from the host and sends
- * a heartbeat to the host
- */
-static int cosm_scif_client(void *unused)
-{
- struct cosm_msg msg = { .id = COSM_MSG_HEARTBEAT };
- struct scif_pollepd pollepd;
- int rc;
-
- allow_signal(SIGKILL);
-
- while (!kthread_should_stop()) {
- pollepd.epd = client_epd;
- pollepd.events = EPOLLIN;
-
- rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC);
- if (rc < 0) {
- if (-EINTR != rc)
- dev_err(&client_spdev->dev,
- "%s %d scif_poll rc %d\n",
- __func__, __LINE__, rc);
- continue;
- }
-
- if (pollepd.revents & EPOLLIN)
- cosm_client_recv();
-
- msg.id = COSM_MSG_HEARTBEAT;
- rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0)
- dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
- __func__, __LINE__, rc);
- }
-
- dev_dbg(&client_spdev->dev, "%s %d Client thread stopped\n",
- __func__, __LINE__);
- return 0;
-}
-
-static void cosm_scif_probe(struct scif_peer_dev *spdev)
-{
- int rc;
-
- dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
- __func__, __LINE__, spdev->dnode);
-
- /* We are only interested in the host with spdev->dnode == 0 */
- if (spdev->dnode)
- return;
-
- client_spdev = spdev;
- rc = cosm_scif_connect();
- if (rc)
- goto exit;
-
- rc = register_reboot_notifier(&cosm_reboot);
- if (rc) {
- dev_err(&spdev->dev,
- "reboot notifier registration failed rc %d\n", rc);
- goto connect_exit;
- }
-
- client_thread = kthread_run(cosm_scif_client, NULL, "cosm_client");
- if (IS_ERR(client_thread)) {
- rc = PTR_ERR(client_thread);
- dev_err(&spdev->dev, "%s %d kthread_run rc %d\n",
- __func__, __LINE__, rc);
- goto unreg_reboot;
- }
- return;
-unreg_reboot:
- unregister_reboot_notifier(&cosm_reboot);
-connect_exit:
- cosm_scif_connect_exit();
-exit:
- client_spdev = NULL;
-}
-
-static void cosm_scif_remove(struct scif_peer_dev *spdev)
-{
- int rc;
-
- dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
- __func__, __LINE__, spdev->dnode);
-
- if (spdev->dnode)
- return;
-
- if (!IS_ERR_OR_NULL(client_thread)) {
- rc = send_sig(SIGKILL, client_thread, 0);
- if (rc) {
- pr_err("%s %d send_sig rc %d\n",
- __func__, __LINE__, rc);
- return;
- }
- kthread_stop(client_thread);
- }
- unregister_reboot_notifier(&cosm_reboot);
- cosm_scif_connect_exit();
- client_spdev = NULL;
-}
-
-static struct scif_client scif_client_cosm = {
- .name = KBUILD_MODNAME,
- .probe = cosm_scif_probe,
- .remove = cosm_scif_remove,
-};
-
-static int __init cosm_client_init(void)
-{
- int rc = scif_client_register(&scif_client_cosm);
-
- if (rc)
- pr_err("scif_client_register failed rc %d\n", rc);
- return rc;
-}
-
-static void __exit cosm_client_exit(void)
-{
- scif_client_unregister(&scif_client_cosm);
-}
-
-module_init(cosm_client_init);
-module_exit(cosm_client_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC card OS state management client driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
deleted file mode 100644
index 25f153367980..000000000000
--- a/drivers/misc/mic/host/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2013, Intel Corporation.
-#
-obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o
-mic_host-objs := mic_main.o
-mic_host-objs += mic_x100.o
-mic_host-objs += mic_smpt.o
-mic_host-objs += mic_intr.o
-mic_host-objs += mic_boot.o
-mic_host-objs += mic_debugfs.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
deleted file mode 100644
index 8cb85b8b3e19..000000000000
--- a/drivers/misc/mic/host/mic_boot.c
+++ /dev/null
@@ -1,588 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/pci.h>
-#include <linux/kmod.h>
-#include <linux/dma-map-ops.h>
-#include <linux/mic_common.h>
-#include <linux/mic_bus.h>
-#include "../bus/scif_bus.h"
-#include "../bus/vop_bus.h"
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-
-static inline struct mic_device *vpdev_to_mdev(struct device *dev)
-{
- return dev_get_drvdata(dev->parent);
-}
-
-static dma_addr_t
-_mic_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- void *va = phys_to_virt(page_to_phys(page)) + offset;
- struct mic_device *mdev = vpdev_to_mdev(dev);
-
- return mic_map_single(mdev, va, size);
-}
-
-static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct mic_device *mdev = vpdev_to_mdev(dev);
-
- mic_unmap_single(mdev, dma_addr, size);
-}
-
-static const struct dma_map_ops _mic_dma_ops = {
- .map_page = _mic_dma_map_page,
- .unmap_page = _mic_dma_unmap_page,
-};
-
-static struct mic_irq *
-__mic_request_irq(struct vop_device *vpdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data, int intr_src)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mic_request_threaded_irq(mdev, func, NULL, name, data,
- intr_src, MIC_INTR_DB);
-}
-
-static void __mic_free_irq(struct vop_device *vpdev,
- struct mic_irq *cookie, void *data)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- mic_free_irq(mdev, cookie, data);
-}
-
-static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- mdev->ops->intr_workarounds(mdev);
-}
-
-static int __mic_next_db(struct vop_device *vpdev)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mic_next_db(mdev);
-}
-
-static void *__mic_get_dp(struct vop_device *vpdev)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mdev->dp;
-}
-
-static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
-{
- return NULL;
-}
-
-static void __mic_send_intr(struct vop_device *vpdev, int db)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- mdev->ops->send_intr(mdev, db);
-}
-
-static void __iomem *__mic_ioremap(struct vop_device *vpdev,
- dma_addr_t pa, size_t len)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mdev->aper.va + pa;
-}
-
-static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
-{
- /* nothing to do */
-}
-
-static struct vop_hw_ops vop_hw_ops = {
- .request_irq = __mic_request_irq,
- .free_irq = __mic_free_irq,
- .ack_interrupt = __mic_ack_interrupt,
- .next_db = __mic_next_db,
- .get_dp = __mic_get_dp,
- .get_remote_dp = __mic_get_remote_dp,
- .send_intr = __mic_send_intr,
- .remap = __mic_ioremap,
- .unmap = __mic_iounmap,
-};
-
-static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
-{
- return dev_get_drvdata(scdev->dev.parent);
-}
-
-static void *__mic_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
- dma_addr_t tmp;
- void *va = kzalloc(size, gfp);
-
- if (va) {
- tmp = mic_map_single(mdev, va, size);
- if (dma_mapping_error(dev, tmp)) {
- kfree(va);
- va = NULL;
- } else {
- *dma_handle = tmp;
- }
- }
- return va;
-}
-
-static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mic_unmap_single(mdev, dma_handle, size);
- kfree(vaddr);
-}
-
-static dma_addr_t
-__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- void *va = phys_to_virt(page_to_phys(page)) + offset;
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mic_map_single(mdev, va, size);
-}
-
-static void
-__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mic_unmap_single(mdev, dma_addr, size);
-}
-
-static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
- struct scatterlist *s;
- int i, j, ret;
- dma_addr_t da;
-
- ret = dma_map_sg(&mdev->pdev->dev, sg, nents, dir);
- if (ret <= 0)
- return 0;
-
- for_each_sg(sg, s, nents, i) {
- da = mic_map(mdev, sg_dma_address(s) + s->offset, s->length);
- if (!da)
- goto err;
- sg_dma_address(s) = da;
- }
- return nents;
-err:
- for_each_sg(sg, s, i, j) {
- mic_unmap(mdev, sg_dma_address(s), s->length);
- sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
- }
- dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
- return 0;
-}
-
-static void __mic_dma_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
- struct scatterlist *s;
- dma_addr_t da;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- da = mic_to_dma_addr(mdev, sg_dma_address(s));
- mic_unmap(mdev, sg_dma_address(s), s->length);
- sg_dma_address(s) = da;
- }
- dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
-}
-
-static const struct dma_map_ops __mic_dma_ops = {
- .alloc = __mic_dma_alloc,
- .free = __mic_dma_free,
- .map_page = __mic_dma_map_page,
- .unmap_page = __mic_dma_unmap_page,
- .map_sg = __mic_dma_map_sg,
- .unmap_sg = __mic_dma_unmap_sg,
-};
-
-static struct mic_irq *
-___mic_request_irq(struct scif_hw_dev *scdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name,
- void *data, int db)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mic_request_threaded_irq(mdev, func, NULL, name, data,
- db, MIC_INTR_DB);
-}
-
-static void
-___mic_free_irq(struct scif_hw_dev *scdev,
- struct mic_irq *cookie, void *data)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mic_free_irq(mdev, cookie, data);
-}
-
-static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mdev->ops->intr_workarounds(mdev);
-}
-
-static int ___mic_next_db(struct scif_hw_dev *scdev)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mic_next_db(mdev);
-}
-
-static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mdev->ops->send_intr(mdev, db);
-}
-
-static void __iomem *___mic_ioremap(struct scif_hw_dev *scdev,
- phys_addr_t pa, size_t len)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mdev->aper.va + pa;
-}
-
-static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
-{
- /* nothing to do */
-}
-
-static struct scif_hw_ops scif_hw_ops = {
- .request_irq = ___mic_request_irq,
- .free_irq = ___mic_free_irq,
- .ack_interrupt = ___mic_ack_interrupt,
- .next_db = ___mic_next_db,
- .send_intr = ___mic_send_intr,
- .remap = ___mic_ioremap,
- .unmap = ___mic_iounmap,
-};
-
-static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
-{
- return dev_get_drvdata(mbdev->dev.parent);
-}
-
-static dma_addr_t
-mic_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- void *va = phys_to_virt(page_to_phys(page)) + offset;
- struct mic_device *mdev = dev_get_drvdata(dev->parent);
-
- return mic_map_single(mdev, va, size);
-}
-
-static void
-mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct mic_device *mdev = dev_get_drvdata(dev->parent);
- mic_unmap_single(mdev, dma_addr, size);
-}
-
-static const struct dma_map_ops mic_dma_ops = {
- .map_page = mic_dma_map_page,
- .unmap_page = mic_dma_unmap_page,
-};
-
-static struct mic_irq *
-_mic_request_threaded_irq(struct mbus_device *mbdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src)
-{
- return mic_request_threaded_irq(mbdev_to_mdev(mbdev), handler,
- thread_fn, name, data,
- intr_src, MIC_INTR_DMA);
-}
-
-static void _mic_free_irq(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data)
-{
- mic_free_irq(mbdev_to_mdev(mbdev), cookie, data);
-}
-
-static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
-{
- struct mic_device *mdev = mbdev_to_mdev(mbdev);
- mdev->ops->intr_workarounds(mdev);
-}
-
-static struct mbus_hw_ops mbus_hw_ops = {
- .request_threaded_irq = _mic_request_threaded_irq,
- .free_irq = _mic_free_irq,
- .ack_interrupt = _mic_ack_interrupt,
-};
-
-/* Initialize the MIC bootparams */
-void mic_bootparam_init(struct mic_device *mdev)
-{
- struct mic_bootparam *bootparam = mdev->dp;
-
- bootparam->magic = cpu_to_le32(MIC_MAGIC);
- bootparam->h2c_config_db = -1;
- bootparam->node_id = mdev->id + 1;
- bootparam->scif_host_dma_addr = 0x0;
- bootparam->scif_card_dma_addr = 0x0;
- bootparam->c2h_scif_db = -1;
- bootparam->h2c_scif_db = -1;
-}
-
-static inline struct mic_device *cosmdev_to_mdev(struct cosm_device *cdev)
-{
- return dev_get_drvdata(cdev->dev.parent);
-}
-
-static void _mic_reset(struct cosm_device *cdev)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- mdev->ops->reset_fw_ready(mdev);
- mdev->ops->reset(mdev);
-}
-
-static bool _mic_ready(struct cosm_device *cdev)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- return mdev->ops->is_fw_ready(mdev);
-}
-
-/**
- * mic_request_dma_chans - Request DMA channels
- * @mdev: pointer to mic_device instance
- *
- * returns number of DMA channels acquired
- */
-static int mic_request_dma_chans(struct mic_device *mdev)
-{
- dma_cap_mask_t mask;
- struct dma_chan *chan;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
-
- do {
- chan = dma_request_channel(mask, mdev->ops->dma_filter,
- &mdev->pdev->dev);
- if (chan) {
- mdev->dma_ch[mdev->num_dma_ch++] = chan;
- if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
- break;
- }
- } while (chan);
- dev_info(&mdev->pdev->dev, "DMA channels # %d\n", mdev->num_dma_ch);
- return mdev->num_dma_ch;
-}
-
-/**
- * mic_free_dma_chans - release DMA channels
- * @mdev: pointer to mic_device instance
- *
- * returns none
- */
-static void mic_free_dma_chans(struct mic_device *mdev)
-{
- int i = 0;
-
- for (i = 0; i < mdev->num_dma_ch; i++) {
- dma_release_channel(mdev->dma_ch[i]);
- mdev->dma_ch[i] = NULL;
- }
- mdev->num_dma_ch = 0;
-}
-
-/**
- * _mic_start - Start the MIC.
- * @cdev: pointer to cosm_device instance
- * @id: MIC device id/index provided by COSM used in other drivers like SCIF
- *
- * This function prepares an MIC for boot and initiates boot.
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- *
- * For all cosm_hw_ops the caller holds a mutex to ensure serialization.
- */
-static int _mic_start(struct cosm_device *cdev, int id)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
- int rc;
-
- mic_bootparam_init(mdev);
- mdev->dma_mbdev = mbus_register_device(&mdev->pdev->dev,
- MBUS_DEV_DMA_HOST, &mic_dma_ops,
- &mbus_hw_ops, id, mdev->mmio.va);
- if (IS_ERR(mdev->dma_mbdev)) {
- rc = PTR_ERR(mdev->dma_mbdev);
- goto unlock_ret;
- }
- if (!mic_request_dma_chans(mdev)) {
- rc = -ENODEV;
- goto dma_remove;
- }
- mdev->scdev = scif_register_device(&mdev->pdev->dev, MIC_SCIF_DEV,
- &__mic_dma_ops, &scif_hw_ops,
- id + 1, 0, &mdev->mmio,
- &mdev->aper, mdev->dp, NULL,
- mdev->dma_ch, mdev->num_dma_ch,
- true);
- if (IS_ERR(mdev->scdev)) {
- rc = PTR_ERR(mdev->scdev);
- goto dma_free;
- }
-
- mdev->vpdev = vop_register_device(&mdev->pdev->dev,
- VOP_DEV_TRNSP, &_mic_dma_ops,
- &vop_hw_ops, id + 1, &mdev->aper,
- mdev->dma_ch[0]);
- if (IS_ERR(mdev->vpdev)) {
- rc = PTR_ERR(mdev->vpdev);
- goto scif_remove;
- }
-
- rc = mdev->ops->load_mic_fw(mdev, NULL);
- if (rc)
- goto vop_remove;
- mic_smpt_restore(mdev);
- mic_intr_restore(mdev);
- mdev->intr_ops->enable_interrupts(mdev);
- mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
- mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
- mdev->ops->send_firmware_intr(mdev);
- goto unlock_ret;
-vop_remove:
- vop_unregister_device(mdev->vpdev);
-scif_remove:
- scif_unregister_device(mdev->scdev);
-dma_free:
- mic_free_dma_chans(mdev);
-dma_remove:
- mbus_unregister_device(mdev->dma_mbdev);
-unlock_ret:
- return rc;
-}
-
-/**
- * _mic_stop - Prepare the MIC for reset and trigger reset.
- * @cdev: pointer to cosm_device instance
- * @force: force a MIC to reset even if it is already offline.
- *
- * RETURNS: None.
- */
-static void _mic_stop(struct cosm_device *cdev, bool force)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- /*
- * Since SCIF handles card shutdown and reset (using COSM), it will
- * will be the first to be registered and the last to be
- * unregistered.
- */
- vop_unregister_device(mdev->vpdev);
- scif_unregister_device(mdev->scdev);
- mic_free_dma_chans(mdev);
- mbus_unregister_device(mdev->dma_mbdev);
- mic_bootparam_init(mdev);
-}
-
-static ssize_t _mic_family(struct cosm_device *cdev, char *buf)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
- static const char *family[MIC_FAMILY_LAST] = { "x100", "Unknown" };
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", family[mdev->family]);
-}
-
-static ssize_t _mic_stepping(struct cosm_device *cdev, char *buf)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
- const char *string = "??";
-
- switch (mdev->stepping) {
- case MIC_A0_STEP:
- string = "A0";
- break;
- case MIC_B0_STEP:
- string = "B0";
- break;
- case MIC_B1_STEP:
- string = "B1";
- break;
- case MIC_C0_STEP:
- string = "C0";
- break;
- default:
- break;
- }
- return scnprintf(buf, PAGE_SIZE, "%s\n", string);
-}
-
-static struct mic_mw *_mic_aper(struct cosm_device *cdev)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- return &mdev->aper;
-}
-
-struct cosm_hw_ops cosm_hw_ops = {
- .reset = _mic_reset,
- .force_reset = _mic_reset,
- .post_reset = NULL,
- .ready = _mic_ready,
- .start = _mic_start,
- .stop = _mic_stop,
- .family = _mic_family,
- .stepping = _mic_stepping,
- .aper = _mic_aper,
-};
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
deleted file mode 100644
index ffda740e20d5..000000000000
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/debugfs.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-
-/* Debugfs parent dir */
-static struct dentry *mic_dbg;
-
-static int mic_smpt_show(struct seq_file *s, void *pos)
-{
- int i;
- struct mic_device *mdev = s->private;
- unsigned long flags;
-
- seq_printf(s, "MIC %-2d |%-10s| %-14s %-10s\n",
- mdev->id, "SMPT entry", "SW DMA addr", "RefCount");
- seq_puts(s, "====================================================\n");
-
- if (mdev->smpt) {
- struct mic_smpt_info *smpt_info = mdev->smpt;
- spin_lock_irqsave(&smpt_info->smpt_lock, flags);
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- seq_printf(s, "%9s|%-10d| %-#14llx %-10lld\n",
- " ", i, smpt_info->entry[i].dma_addr,
- smpt_info->entry[i].ref_count);
- }
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- }
- seq_puts(s, "====================================================\n");
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_smpt);
-
-static int mic_post_code_show(struct seq_file *s, void *pos)
-{
- struct mic_device *mdev = s->private;
- u32 reg = mdev->ops->get_postcode(mdev);
-
- seq_printf(s, "%c%c", reg & 0xff, (reg >> 8) & 0xff);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_post_code);
-
-static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
-{
- struct mic_device *mdev = s->private;
- int reg;
- int i, j;
- u16 entry;
- u16 vector;
- struct pci_dev *pdev = mdev->pdev;
-
- if (pci_dev_msi_enabled(pdev)) {
- for (i = 0; i < mdev->irq_info.num_vectors; i++) {
- if (pdev->msix_enabled) {
- entry = mdev->irq_info.msix_entries[i].entry;
- vector = mdev->irq_info.msix_entries[i].vector;
- } else {
- entry = 0;
- vector = pdev->irq;
- }
-
- reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry);
-
- seq_printf(s, "%s %-10d %s %-10d MXAR[%d]: %08X\n",
- "IRQ:", vector, "Entry:", entry, i, reg);
-
- seq_printf(s, "%-10s", "offset:");
- for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
- seq_printf(s, "%4d ", j);
- seq_puts(s, "\n");
-
-
- seq_printf(s, "%-10s", "count:");
- for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
- seq_printf(s, "%4d ",
- (mdev->irq_info.mic_msi_map[i] &
- BIT(j)) ? 1 : 0);
- seq_puts(s, "\n\n");
- }
- } else {
- seq_puts(s, "MSI/MSIx interrupts not enabled\n");
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_msi_irq_info);
-
-/*
- * mic_create_debug_dir - Initialize MIC debugfs entries.
- */
-void mic_create_debug_dir(struct mic_device *mdev)
-{
- char name[16];
-
- if (!mic_dbg)
- return;
-
- scnprintf(name, sizeof(name), "mic%d", mdev->id);
- mdev->dbg_dir = debugfs_create_dir(name, mic_dbg);
-
- debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev,
- &mic_smpt_fops);
-
- debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
- &mic_post_code_fops);
-
- debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
- &mic_msi_irq_info_fops);
-}
-
-/*
- * mic_delete_debug_dir - Uninitialize MIC debugfs entries.
- */
-void mic_delete_debug_dir(struct mic_device *mdev)
-{
- debugfs_remove_recursive(mdev->dbg_dir);
-}
-
-/*
- * mic_init_debugfs - Initialize global debugfs entry.
- */
-void __init mic_init_debugfs(void)
-{
- mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-/*
- * mic_exit_debugfs - Uninitialize global debugfs entry
- */
-void mic_exit_debugfs(void)
-{
- debugfs_remove(mic_dbg);
-}
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
deleted file mode 100644
index 41bcd308ae59..000000000000
--- a/drivers/misc/mic/host/mic_device.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef _MIC_DEVICE_H_
-#define _MIC_DEVICE_H_
-
-#include <linux/cdev.h>
-#include <linux/idr.h>
-#include <linux/notifier.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/miscdevice.h>
-#include <linux/mic_bus.h>
-#include "../bus/scif_bus.h"
-#include "../bus/vop_bus.h"
-#include "../bus/cosm_bus.h"
-#include "mic_intr.h"
-
-/**
- * enum mic_stepping - MIC stepping ids.
- */
-enum mic_stepping {
- MIC_A0_STEP = 0x0,
- MIC_B0_STEP = 0x10,
- MIC_B1_STEP = 0x11,
- MIC_C0_STEP = 0x20,
-};
-
-extern struct cosm_hw_ops cosm_hw_ops;
-
-/**
- * struct mic_device - MIC device information for each card.
- *
- * @mmio: MMIO bar information.
- * @aper: Aperture bar information.
- * @family: The MIC family to which this device belongs.
- * @ops: MIC HW specific operations.
- * @id: The unique device id for this MIC device.
- * @stepping: Stepping ID.
- * @pdev: Underlying PCI device.
- * @mic_mutex: Mutex for synchronizing access to mic_device.
- * @intr_ops: HW specific interrupt operations.
- * @smpt_ops: Hardware specific SMPT operations.
- * @smpt: MIC SMPT information.
- * @intr_info: H/W specific interrupt information.
- * @irq_info: The OS specific irq information
- * @dbg_dir: debugfs directory of this MIC device.
- * @bootaddr: MIC boot address.
- * @dp: virtio device page
- * @dp_dma_addr: virtio device page DMA address.
- * @dma_mbdev: MIC BUS DMA device.
- * @dma_ch - Array of DMA channels
- * @num_dma_ch - Number of DMA channels available
- * @scdev: SCIF device on the SCIF virtual bus.
- * @vpdev: Virtio over PCIe device on the VOP virtual bus.
- * @cosm_dev: COSM device
- */
-struct mic_device {
- struct mic_mw mmio;
- struct mic_mw aper;
- enum mic_hw_family family;
- struct mic_hw_ops *ops;
- int id;
- enum mic_stepping stepping;
- struct pci_dev *pdev;
- struct mutex mic_mutex;
- struct mic_hw_intr_ops *intr_ops;
- struct mic_smpt_ops *smpt_ops;
- struct mic_smpt_info *smpt;
- struct mic_intr_info *intr_info;
- struct mic_irq_info irq_info;
- struct dentry *dbg_dir;
- u32 bootaddr;
- void *dp;
- dma_addr_t dp_dma_addr;
- struct mbus_device *dma_mbdev;
- struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
- int num_dma_ch;
- struct scif_hw_dev *scdev;
- struct vop_device *vpdev;
- struct cosm_device *cosm_dev;
-};
-
-/**
- * struct mic_hw_ops - MIC HW specific operations.
- * @aper_bar: Aperture bar resource number.
- * @mmio_bar: MMIO bar resource number.
- * @read_spad: Read from scratch pad register.
- * @write_spad: Write to scratch pad register.
- * @send_intr: Send an interrupt for a particular doorbell on the card.
- * @ack_interrupt: Hardware specific operations to ack the h/w on
- * receipt of an interrupt.
- * @intr_workarounds: Hardware specific workarounds needed after
- * handling an interrupt.
- * @reset: Reset the remote processor.
- * @reset_fw_ready: Reset firmware ready field.
- * @is_fw_ready: Check if firmware is ready for OS download.
- * @send_firmware_intr: Send an interrupt to the card firmware.
- * @load_mic_fw: Load firmware segments required to boot the card
- * into card memory. This includes the kernel, command line, ramdisk etc.
- * @get_postcode: Get post code status from firmware.
- * @dma_filter: DMA filter function to be used.
- */
-struct mic_hw_ops {
- u8 aper_bar;
- u8 mmio_bar;
- u32 (*read_spad)(struct mic_device *mdev, unsigned int idx);
- void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
- void (*send_intr)(struct mic_device *mdev, int doorbell);
- u32 (*ack_interrupt)(struct mic_device *mdev);
- void (*intr_workarounds)(struct mic_device *mdev);
- void (*reset)(struct mic_device *mdev);
- void (*reset_fw_ready)(struct mic_device *mdev);
- bool (*is_fw_ready)(struct mic_device *mdev);
- void (*send_firmware_intr)(struct mic_device *mdev);
- int (*load_mic_fw)(struct mic_device *mdev, const char *buf);
- u32 (*get_postcode)(struct mic_device *mdev);
- bool (*dma_filter)(struct dma_chan *chan, void *param);
-};
-
-/**
- * mic_mmio_read - read from an MMIO register.
- * @mw: MMIO register base virtual address.
- * @offset: register offset.
- *
- * RETURNS: register value.
- */
-static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset)
-{
- return ioread32(mw->va + offset);
-}
-
-/**
- * mic_mmio_write - write to an MMIO register.
- * @mw: MMIO register base virtual address.
- * @val: the data value to put into the register
- * @offset: register offset.
- *
- * RETURNS: none.
- */
-static inline void
-mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
-{
- iowrite32(val, mw->va + offset);
-}
-
-void mic_bootparam_init(struct mic_device *mdev);
-void mic_create_debug_dir(struct mic_device *dev);
-void mic_delete_debug_dir(struct mic_device *dev);
-void __init mic_init_debugfs(void);
-void mic_exit_debugfs(void);
-#endif
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
deleted file mode 100644
index 85b3221b5d40..000000000000
--- a/drivers/misc/mic/host/mic_intr.c
+++ /dev/null
@@ -1,635 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-
-static irqreturn_t mic_thread_fn(int irq, void *dev)
-{
- struct mic_device *mdev = dev;
- struct mic_intr_info *intr_info = mdev->intr_info;
- struct mic_irq_info *irq_info = &mdev->irq_info;
- struct mic_intr_cb *intr_cb;
- struct pci_dev *pdev = mdev->pdev;
- int i;
-
- spin_lock(&irq_info->mic_thread_lock);
- for (i = intr_info->intr_start_idx[MIC_INTR_DB];
- i < intr_info->intr_len[MIC_INTR_DB]; i++)
- if (test_and_clear_bit(i, &irq_info->mask)) {
- list_for_each_entry(intr_cb, &irq_info->cb_list[i],
- list)
- if (intr_cb->thread_fn)
- intr_cb->thread_fn(pdev->irq,
- intr_cb->data);
- }
- spin_unlock(&irq_info->mic_thread_lock);
- return IRQ_HANDLED;
-}
-/**
- * mic_interrupt - Generic interrupt handler for
- * MSI and INTx based interrupts.
- * @irq: interrupt to handle (unused)
- * @dev: pointer to the mic_device instance
- */
-static irqreturn_t mic_interrupt(int irq, void *dev)
-{
- struct mic_device *mdev = dev;
- struct mic_intr_info *intr_info = mdev->intr_info;
- struct mic_irq_info *irq_info = &mdev->irq_info;
- struct mic_intr_cb *intr_cb;
- struct pci_dev *pdev = mdev->pdev;
- u32 mask;
- int i;
-
- mask = mdev->ops->ack_interrupt(mdev);
- if (!mask)
- return IRQ_NONE;
-
- spin_lock(&irq_info->mic_intr_lock);
- for (i = intr_info->intr_start_idx[MIC_INTR_DB];
- i < intr_info->intr_len[MIC_INTR_DB]; i++)
- if (mask & BIT(i)) {
- list_for_each_entry(intr_cb, &irq_info->cb_list[i],
- list)
- if (intr_cb->handler)
- intr_cb->handler(pdev->irq,
- intr_cb->data);
- set_bit(i, &irq_info->mask);
- }
- spin_unlock(&irq_info->mic_intr_lock);
- return IRQ_WAKE_THREAD;
-}
-
-/* Return the interrupt offset from the index. Index is 0 based. */
-static u16 mic_map_src_to_offset(struct mic_device *mdev,
- int intr_src, enum mic_intr_type type)
-{
- if (type >= MIC_NUM_INTR_TYPES)
- return MIC_NUM_OFFSETS;
- if (intr_src >= mdev->intr_info->intr_len[type])
- return MIC_NUM_OFFSETS;
-
- return mdev->intr_info->intr_start_idx[type] + intr_src;
-}
-
-/* Return next available msix_entry. */
-static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
-{
- int i;
- struct mic_irq_info *info = &mdev->irq_info;
-
- for (i = 0; i < info->num_vectors; i++)
- if (!info->mic_msi_map[i])
- return &info->msix_entries[i];
- return NULL;
-}
-
-/**
- * mic_register_intr_callback - Register a callback handler for the
- * given source id.
- *
- * @mdev: pointer to the mic_device instance
- * @idx: The source id to be registered.
- * @handler: The function to be called when the source id receives
- * the interrupt.
- * @thread_fn: thread fn. corresponding to the handler
- * @data: Private data of the requester.
- * Return the callback structure that was registered or an
- * appropriate error on failure.
- */
-static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
- u8 idx, irq_handler_t handler, irq_handler_t thread_fn,
- void *data)
-{
- struct mic_intr_cb *intr_cb;
- unsigned long flags;
- int rc;
- intr_cb = kmalloc(sizeof(*intr_cb), GFP_KERNEL);
-
- if (!intr_cb)
- return ERR_PTR(-ENOMEM);
-
- intr_cb->handler = handler;
- intr_cb->thread_fn = thread_fn;
- intr_cb->data = data;
- intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
- 0, 0, GFP_KERNEL);
- if (intr_cb->cb_id < 0) {
- rc = intr_cb->cb_id;
- goto ida_fail;
- }
-
- spin_lock(&mdev->irq_info.mic_thread_lock);
- spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
- list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
- spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
-
- return intr_cb;
-ida_fail:
- kfree(intr_cb);
- return ERR_PTR(rc);
-}
-
-/**
- * mic_unregister_intr_callback - Unregister the callback handler
- * identified by its callback id.
- *
- * @mdev: pointer to the mic_device instance
- * @idx: The callback structure id to be unregistered.
- * Return the source id that was unregistered or MIC_NUM_OFFSETS if no
- * such callback handler was found.
- */
-static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
-{
- struct list_head *pos, *tmp;
- struct mic_intr_cb *intr_cb;
- unsigned long flags;
- int i;
-
- spin_lock(&mdev->irq_info.mic_thread_lock);
- spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
- for (i = 0; i < MIC_NUM_OFFSETS; i++) {
- list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
- intr_cb = list_entry(pos, struct mic_intr_cb, list);
- if (intr_cb->cb_id == idx) {
- list_del(pos);
- ida_simple_remove(&mdev->irq_info.cb_ida,
- intr_cb->cb_id);
- kfree(intr_cb);
- spin_unlock_irqrestore(
- &mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
- return i;
- }
- }
- }
- spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
- return MIC_NUM_OFFSETS;
-}
-
-/**
- * mic_setup_msix - Initializes MSIx interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc, i;
- int entry_size = sizeof(*mdev->irq_info.msix_entries);
-
- mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX,
- entry_size, GFP_KERNEL);
- if (!mdev->irq_info.msix_entries) {
- rc = -ENOMEM;
- goto err_nomem1;
- }
-
- for (i = 0; i < MIC_MIN_MSIX; i++)
- mdev->irq_info.msix_entries[i].entry = i;
-
- rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
- MIC_MIN_MSIX);
- if (rc) {
- dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
- goto err_enable_msix;
- }
-
- mdev->irq_info.num_vectors = MIC_MIN_MSIX;
- mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
- mdev->irq_info.num_vectors), GFP_KERNEL);
-
- if (!mdev->irq_info.mic_msi_map) {
- rc = -ENOMEM;
- goto err_nomem2;
- }
-
- dev_dbg(&mdev->pdev->dev,
- "%d MSIx irqs setup\n", mdev->irq_info.num_vectors);
- return 0;
-err_nomem2:
- pci_disable_msix(pdev);
-err_enable_msix:
- kfree(mdev->irq_info.msix_entries);
-err_nomem1:
- mdev->irq_info.num_vectors = 0;
- return rc;
-}
-
-/**
- * mic_setup_callbacks - Initialize data structures needed
- * to handle callbacks.
- *
- * @mdev: pointer to mic_device instance
- */
-static int mic_setup_callbacks(struct mic_device *mdev)
-{
- int i;
-
- mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS,
- sizeof(*mdev->irq_info.cb_list),
- GFP_KERNEL);
- if (!mdev->irq_info.cb_list)
- return -ENOMEM;
-
- for (i = 0; i < MIC_NUM_OFFSETS; i++)
- INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
- ida_init(&mdev->irq_info.cb_ida);
- spin_lock_init(&mdev->irq_info.mic_intr_lock);
- spin_lock_init(&mdev->irq_info.mic_thread_lock);
- return 0;
-}
-
-/**
- * mic_release_callbacks - Uninitialize data structures needed
- * to handle callbacks.
- *
- * @mdev: pointer to mic_device instance
- */
-static void mic_release_callbacks(struct mic_device *mdev)
-{
- unsigned long flags;
- struct list_head *pos, *tmp;
- struct mic_intr_cb *intr_cb;
- int i;
-
- spin_lock(&mdev->irq_info.mic_thread_lock);
- spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
- for (i = 0; i < MIC_NUM_OFFSETS; i++) {
- if (list_empty(&mdev->irq_info.cb_list[i]))
- break;
-
- list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
- intr_cb = list_entry(pos, struct mic_intr_cb, list);
- list_del(pos);
- ida_simple_remove(&mdev->irq_info.cb_ida,
- intr_cb->cb_id);
- kfree(intr_cb);
- }
- }
- spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
- ida_destroy(&mdev->irq_info.cb_ida);
- kfree(mdev->irq_info.cb_list);
-}
-
-/**
- * mic_setup_msi - Initializes MSI interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc;
-
- rc = pci_enable_msi(pdev);
- if (rc) {
- dev_dbg(&pdev->dev, "Error enabling MSI. rc = %d\n", rc);
- return rc;
- }
-
- mdev->irq_info.num_vectors = 1;
- mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
- mdev->irq_info.num_vectors), GFP_KERNEL);
-
- if (!mdev->irq_info.mic_msi_map) {
- rc = -ENOMEM;
- goto err_nomem1;
- }
-
- rc = mic_setup_callbacks(mdev);
- if (rc) {
- dev_err(&pdev->dev, "Error setting up callbacks\n");
- goto err_nomem2;
- }
-
- rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
- 0, "mic-msi", mdev);
- if (rc) {
- dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
- goto err_irq_req_fail;
- }
-
- dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors);
- return 0;
-err_irq_req_fail:
- mic_release_callbacks(mdev);
-err_nomem2:
- kfree(mdev->irq_info.mic_msi_map);
-err_nomem1:
- pci_disable_msi(pdev);
- mdev->irq_info.num_vectors = 0;
- return rc;
-}
-
-/**
- * mic_setup_intx - Initializes legacy interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc;
-
- /* Enable intx */
- pci_intx(pdev, 1);
- rc = mic_setup_callbacks(mdev);
- if (rc) {
- dev_err(&pdev->dev, "Error setting up callbacks\n");
- goto err_nomem;
- }
-
- rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
- IRQF_SHARED, "mic-intx", mdev);
- if (rc)
- goto err;
-
- dev_dbg(&pdev->dev, "intx irq setup\n");
- return 0;
-err:
- mic_release_callbacks(mdev);
-err_nomem:
- return rc;
-}
-
-/**
- * mic_next_db - Retrieve the next doorbell interrupt source id.
- * The id is picked sequentially from the available pool of
- * doorlbell ids.
- *
- * @mdev: pointer to the mic_device instance.
- *
- * Returns the next doorbell interrupt source.
- */
-int mic_next_db(struct mic_device *mdev)
-{
- int next_db;
-
- next_db = mdev->irq_info.next_avail_src %
- mdev->intr_info->intr_len[MIC_INTR_DB];
- mdev->irq_info.next_avail_src++;
- return next_db;
-}
-
-#define COOKIE_ID_SHIFT 16
-#define GET_ENTRY(cookie) ((cookie) & 0xFFFF)
-#define GET_OFFSET(cookie) ((cookie) >> COOKIE_ID_SHIFT)
-#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT)
-
-/**
- * mic_request_threaded_irq - request an irq. mic_mutex needs
- * to be held before calling this function.
- *
- * @mdev: pointer to mic_device instance
- * @handler: The callback function that handles the interrupt.
- * The function needs to call ack_interrupts
- * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts.
- * @thread_fn: thread fn required by request_threaded_irq.
- * @name: The ASCII name of the callee requesting the irq.
- * @data: private data that is returned back when calling the
- * function handler.
- * @intr_src: The source id of the requester. Its the doorbell id
- * for Doorbell interrupts and DMA channel id for DMA interrupts.
- * @type: The type of interrupt. Values defined in mic_intr_type
- *
- * returns: The cookie that is transparent to the caller. Passed
- * back when calling mic_free_irq. An appropriate error code
- * is returned on failure. Caller needs to use IS_ERR(return_val)
- * to check for failure and PTR_ERR(return_val) to obtained the
- * error code.
- *
- */
-struct mic_irq *
-mic_request_threaded_irq(struct mic_device *mdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src,
- enum mic_intr_type type)
-{
- u16 offset;
- int rc = 0;
- struct msix_entry *msix = NULL;
- unsigned long cookie = 0;
- u16 entry;
- struct mic_intr_cb *intr_cb;
- struct pci_dev *pdev = mdev->pdev;
-
- offset = mic_map_src_to_offset(mdev, intr_src, type);
- if (offset >= MIC_NUM_OFFSETS) {
- dev_err(&mdev->pdev->dev,
- "Error mapping index %d to a valid source id.\n",
- intr_src);
- rc = -EINVAL;
- goto err;
- }
-
- if (mdev->irq_info.num_vectors > 1) {
- msix = mic_get_available_vector(mdev);
- if (!msix) {
- dev_err(&mdev->pdev->dev,
- "No MSIx vectors available for use.\n");
- rc = -ENOSPC;
- goto err;
- }
-
- rc = request_threaded_irq(msix->vector, handler, thread_fn,
- 0, name, data);
- if (rc) {
- dev_dbg(&mdev->pdev->dev,
- "request irq failed rc = %d\n", rc);
- goto err;
- }
- entry = msix->entry;
- mdev->irq_info.mic_msi_map[entry] |= BIT(offset);
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, true);
- cookie = MK_COOKIE(entry, offset);
- dev_dbg(&mdev->pdev->dev, "irq: %d assigned for src: %d\n",
- msix->vector, intr_src);
- } else {
- intr_cb = mic_register_intr_callback(mdev, offset, handler,
- thread_fn, data);
- if (IS_ERR(intr_cb)) {
- dev_err(&mdev->pdev->dev,
- "No available callback entries for use\n");
- rc = PTR_ERR(intr_cb);
- goto err;
- }
-
- entry = 0;
- if (pci_dev_msi_enabled(pdev)) {
- mdev->irq_info.mic_msi_map[entry] |= (1 << offset);
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, true);
- }
- cookie = MK_COOKIE(entry, intr_cb->cb_id);
- dev_dbg(&mdev->pdev->dev, "callback %d registered for src: %d\n",
- intr_cb->cb_id, intr_src);
- }
- return (struct mic_irq *)cookie;
-err:
- return ERR_PTR(rc);
-}
-
-/**
- * mic_free_irq - free irq. mic_mutex
- * needs to be held before calling this function.
- *
- * @mdev: pointer to mic_device instance
- * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
- * @data: private data specified by the calling function during the
- * mic_request_threaded_irq
- *
- * returns: none.
- */
-void mic_free_irq(struct mic_device *mdev,
- struct mic_irq *cookie, void *data)
-{
- u32 offset;
- u32 entry;
- u8 src_id;
- unsigned int irq;
- struct pci_dev *pdev = mdev->pdev;
-
- entry = GET_ENTRY((unsigned long)cookie);
- offset = GET_OFFSET((unsigned long)cookie);
- if (mdev->irq_info.num_vectors > 1) {
- if (entry >= mdev->irq_info.num_vectors) {
- dev_warn(&mdev->pdev->dev,
- "entry %d should be < num_irq %d\n",
- entry, mdev->irq_info.num_vectors);
- return;
- }
- irq = mdev->irq_info.msix_entries[entry].vector;
- free_irq(irq, data);
- mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset));
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, false);
-
- dev_dbg(&mdev->pdev->dev, "irq: %d freed\n", irq);
- } else {
- irq = pdev->irq;
- src_id = mic_unregister_intr_callback(mdev, offset);
- if (src_id >= MIC_NUM_OFFSETS) {
- dev_warn(&mdev->pdev->dev, "Error unregistering callback\n");
- return;
- }
- if (pci_dev_msi_enabled(pdev)) {
- mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id));
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, src_id, false);
- }
- dev_dbg(&mdev->pdev->dev, "callback %d unregistered for src: %d\n",
- offset, src_id);
- }
-}
-
-/**
- * mic_setup_interrupts - Initializes interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc;
-
- rc = mic_setup_msix(mdev, pdev);
- if (!rc)
- goto done;
-
- rc = mic_setup_msi(mdev, pdev);
- if (!rc)
- goto done;
-
- rc = mic_setup_intx(mdev, pdev);
- if (rc) {
- dev_err(&mdev->pdev->dev, "no usable interrupts\n");
- return rc;
- }
-done:
- mdev->intr_ops->enable_interrupts(mdev);
- return 0;
-}
-
-/**
- * mic_free_interrupts - Frees interrupts setup by mic_setup_interrupts
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * returns none.
- */
-void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int i;
-
- mdev->intr_ops->disable_interrupts(mdev);
- if (mdev->irq_info.num_vectors > 1) {
- for (i = 0; i < mdev->irq_info.num_vectors; i++) {
- if (mdev->irq_info.mic_msi_map[i])
- dev_warn(&pdev->dev, "irq %d may still be in use.\n",
- mdev->irq_info.msix_entries[i].vector);
- }
- kfree(mdev->irq_info.mic_msi_map);
- kfree(mdev->irq_info.msix_entries);
- pci_disable_msix(pdev);
- } else {
- if (pci_dev_msi_enabled(pdev)) {
- free_irq(pdev->irq, mdev);
- kfree(mdev->irq_info.mic_msi_map);
- pci_disable_msi(pdev);
- } else {
- free_irq(pdev->irq, mdev);
- }
- mic_release_callbacks(mdev);
- }
-}
-
-/**
- * mic_intr_restore - Restore MIC interrupt registers.
- *
- * @mdev: pointer to mic_device instance.
- *
- * Restore the interrupt registers to values previously
- * stored in the SW data structures. mic_mutex needs to
- * be held before calling this function.
- *
- * returns None.
- */
-void mic_intr_restore(struct mic_device *mdev)
-{
- int entry, offset;
- struct pci_dev *pdev = mdev->pdev;
-
- if (!pci_dev_msi_enabled(pdev))
- return;
-
- for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) {
- for (offset = 0; offset < MIC_NUM_OFFSETS; offset++) {
- if (mdev->irq_info.mic_msi_map[entry] & BIT(offset))
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, true);
- }
- }
-}
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
deleted file mode 100644
index b14ba818006f..000000000000
--- a/drivers/misc/mic/host/mic_intr.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef _MIC_INTR_H_
-#define _MIC_INTR_H_
-
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-/*
- * The minimum number of msix vectors required for normal operation.
- * 3 for virtio network, console and block devices.
- * 1 for card shutdown notifications.
- * 4 for host owned DMA channels.
- * 1 for SCIF
- */
-#define MIC_MIN_MSIX 9
-#define MIC_NUM_OFFSETS 32
-
-/**
- * mic_intr_source - The type of source that will generate
- * the interrupt.The number of types needs to be in sync with
- * MIC_NUM_INTR_TYPES
- *
- * MIC_INTR_DB: The source is a doorbell
- * MIC_INTR_DMA: The source is a DMA channel
- * MIC_INTR_ERR: The source is an error interrupt e.g. SBOX ERR
- * MIC_NUM_INTR_TYPES: Total number of interrupt sources.
- */
-enum mic_intr_type {
- MIC_INTR_DB = 0,
- MIC_INTR_DMA,
- MIC_INTR_ERR,
- MIC_NUM_INTR_TYPES
-};
-
-/**
- * struct mic_intr_info - Contains h/w specific interrupt sources
- * information.
- *
- * @intr_start_idx: Contains the starting indexes of the
- * interrupt types.
- * @intr_len: Contains the length of the interrupt types.
- */
-struct mic_intr_info {
- u16 intr_start_idx[MIC_NUM_INTR_TYPES];
- u16 intr_len[MIC_NUM_INTR_TYPES];
-};
-
-/**
- * struct mic_irq_info - OS specific irq information
- *
- * @next_avail_src: next available doorbell that can be assigned.
- * @msix_entries: msix entries allocated while setting up MSI-x
- * @mic_msi_map: The MSI/MSI-x mapping information.
- * @num_vectors: The number of MSI/MSI-x vectors that have been allocated.
- * @cb_ida: callback ID allocator to track the callbacks registered.
- * @mic_intr_lock: spinlock to protect the interrupt callback list.
- * @mic_thread_lock: spinlock to protect the thread callback list.
- * This lock is used to protect against thread_fn while
- * mic_intr_lock is used to protect against interrupt handler.
- * @cb_list: Array of callback lists one for each source.
- * @mask: Mask used by the main thread fn to call the underlying thread fns.
- */
-struct mic_irq_info {
- int next_avail_src;
- struct msix_entry *msix_entries;
- u32 *mic_msi_map;
- u16 num_vectors;
- struct ida cb_ida;
- spinlock_t mic_intr_lock;
- spinlock_t mic_thread_lock;
- struct list_head *cb_list;
- unsigned long mask;
-};
-
-/**
- * struct mic_intr_cb - Interrupt callback structure.
- *
- * @handler: The callback function
- * @thread_fn: The thread_fn.
- * @data: Private data of the requester.
- * @cb_id: The callback id. Identifies this callback.
- * @list: list head pointing to the next callback structure.
- */
-struct mic_intr_cb {
- irq_handler_t handler;
- irq_handler_t thread_fn;
- void *data;
- int cb_id;
- struct list_head list;
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/* Forward declaration */
-struct mic_device;
-
-/**
- * struct mic_hw_intr_ops: MIC HW specific interrupt operations
- * @intr_init: Initialize H/W specific interrupt information.
- * @enable_interrupts: Enable interrupts from the hardware.
- * @disable_interrupts: Disable interrupts from the hardware.
- * @program_msi_to_src_map: Update MSI mapping registers with
- * irq information.
- * @read_msi_to_src_map: Read MSI mapping registers containing
- * irq information.
- */
-struct mic_hw_intr_ops {
- void (*intr_init)(struct mic_device *mdev);
- void (*enable_interrupts)(struct mic_device *mdev);
- void (*disable_interrupts)(struct mic_device *mdev);
- void (*program_msi_to_src_map) (struct mic_device *mdev,
- int idx, int intr_src, bool set);
- u32 (*read_msi_to_src_map) (struct mic_device *mdev,
- int idx);
-};
-
-int mic_next_db(struct mic_device *mdev);
-struct mic_irq *
-mic_request_threaded_irq(struct mic_device *mdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src,
- enum mic_intr_type type);
-void mic_free_irq(struct mic_device *mdev,
- struct mic_irq *cookie, void *data);
-int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
-void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
-void mic_intr_restore(struct mic_device *mdev);
-#endif
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
deleted file mode 100644
index ea4608527ea0..000000000000
--- a/drivers/misc/mic/host/mic_main.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_x100.h"
-#include "mic_smpt.h"
-
-static const char mic_driver_name[] = "mic";
-
-static const struct pci_device_id mic_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2253)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2254)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2255)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2256)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2257)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2258)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2259)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225a)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225b)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225c)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225d)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225e)},
-
- /* required last entry */
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
-
-/* ID allocator for MIC devices */
-static struct ida g_mic_ida;
-
-/* Initialize the device page */
-static int mic_dp_init(struct mic_device *mdev)
-{
- mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL);
- if (!mdev->dp)
- return -ENOMEM;
-
- mdev->dp_dma_addr = mic_map_single(mdev,
- mdev->dp, MIC_DP_SIZE);
- if (mic_map_error(mdev->dp_dma_addr)) {
- kfree(mdev->dp);
- dev_err(&mdev->pdev->dev, "%s %d err %d\n",
- __func__, __LINE__, -ENOMEM);
- return -ENOMEM;
- }
- mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
- mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
- return 0;
-}
-
-/* Uninitialize the device page */
-static void mic_dp_uninit(struct mic_device *mdev)
-{
- mic_unmap_single(mdev, mdev->dp_dma_addr, MIC_DP_SIZE);
- kfree(mdev->dp);
-}
-
-/**
- * mic_ops_init: Initialize HW specific operation tables.
- *
- * @mdev: pointer to mic_device instance
- *
- * returns none.
- */
-static void mic_ops_init(struct mic_device *mdev)
-{
- switch (mdev->family) {
- case MIC_FAMILY_X100:
- mdev->ops = &mic_x100_ops;
- mdev->intr_ops = &mic_x100_intr_ops;
- mdev->smpt_ops = &mic_x100_smpt_ops;
- break;
- default:
- break;
- }
-}
-
-/**
- * mic_get_family - Determine hardware family to which this MIC belongs.
- *
- * @pdev: The pci device structure
- *
- * returns family.
- */
-static enum mic_hw_family mic_get_family(struct pci_dev *pdev)
-{
- enum mic_hw_family family;
-
- switch (pdev->device) {
- case MIC_X100_PCI_DEVICE_2250:
- case MIC_X100_PCI_DEVICE_2251:
- case MIC_X100_PCI_DEVICE_2252:
- case MIC_X100_PCI_DEVICE_2253:
- case MIC_X100_PCI_DEVICE_2254:
- case MIC_X100_PCI_DEVICE_2255:
- case MIC_X100_PCI_DEVICE_2256:
- case MIC_X100_PCI_DEVICE_2257:
- case MIC_X100_PCI_DEVICE_2258:
- case MIC_X100_PCI_DEVICE_2259:
- case MIC_X100_PCI_DEVICE_225a:
- case MIC_X100_PCI_DEVICE_225b:
- case MIC_X100_PCI_DEVICE_225c:
- case MIC_X100_PCI_DEVICE_225d:
- case MIC_X100_PCI_DEVICE_225e:
- family = MIC_FAMILY_X100;
- break;
- default:
- family = MIC_FAMILY_UNKNOWN;
- break;
- }
- return family;
-}
-
-/**
- * mic_device_init - Allocates and initializes the MIC device structure
- *
- * @mdev: pointer to mic_device instance
- * @pdev: The pci device structure
- *
- * returns none.
- */
-static void
-mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
-{
- mdev->pdev = pdev;
- mdev->family = mic_get_family(pdev);
- mdev->stepping = pdev->revision;
- mic_ops_init(mdev);
- mutex_init(&mdev->mic_mutex);
- mdev->irq_info.next_avail_src = 0;
-}
-
-/**
- * mic_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in mic_pci_tbl
- *
- * returns 0 on success, < 0 on failure.
- */
-static int mic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
- struct mic_device *mdev;
-
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev) {
- rc = -ENOMEM;
- goto mdev_alloc_fail;
- }
- mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
- if (mdev->id < 0) {
- rc = mdev->id;
- dev_err(&pdev->dev, "ida_simple_get failed rc %d\n", rc);
- goto ida_fail;
- }
-
- mic_device_init(mdev, pdev);
-
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev, "failed to enable pci device.\n");
- goto ida_remove;
- }
-
- pci_set_master(pdev);
-
- rc = pci_request_regions(pdev, mic_driver_name);
- if (rc) {
- dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
- }
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- dev_err(&pdev->dev, "Cannot set DMA mask\n");
- goto release_regions;
- }
-
- mdev->mmio.pa = pci_resource_start(pdev, mdev->ops->mmio_bar);
- mdev->mmio.len = pci_resource_len(pdev, mdev->ops->mmio_bar);
- mdev->mmio.va = pci_ioremap_bar(pdev, mdev->ops->mmio_bar);
- if (!mdev->mmio.va) {
- dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
- rc = -EIO;
- goto release_regions;
- }
-
- mdev->aper.pa = pci_resource_start(pdev, mdev->ops->aper_bar);
- mdev->aper.len = pci_resource_len(pdev, mdev->ops->aper_bar);
- mdev->aper.va = ioremap_wc(mdev->aper.pa, mdev->aper.len);
- if (!mdev->aper.va) {
- dev_err(&pdev->dev, "Cannot remap Aperture BAR\n");
- rc = -EIO;
- goto unmap_mmio;
- }
-
- mdev->intr_ops->intr_init(mdev);
- rc = mic_setup_interrupts(mdev, pdev);
- if (rc) {
- dev_err(&pdev->dev, "mic_setup_interrupts failed %d\n", rc);
- goto unmap_aper;
- }
- rc = mic_smpt_init(mdev);
- if (rc) {
- dev_err(&pdev->dev, "smpt_init failed %d\n", rc);
- goto free_interrupts;
- }
-
- pci_set_drvdata(pdev, mdev);
-
- rc = mic_dp_init(mdev);
- if (rc) {
- dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc);
- goto smpt_uninit;
- }
- mic_bootparam_init(mdev);
- mic_create_debug_dir(mdev);
-
- mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
- if (IS_ERR(mdev->cosm_dev)) {
- rc = PTR_ERR(mdev->cosm_dev);
- dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
- goto cleanup_debug_dir;
- }
- return 0;
-cleanup_debug_dir:
- mic_delete_debug_dir(mdev);
- mic_dp_uninit(mdev);
-smpt_uninit:
- mic_smpt_uninit(mdev);
-free_interrupts:
- mic_free_interrupts(mdev, pdev);
-unmap_aper:
- iounmap(mdev->aper.va);
-unmap_mmio:
- iounmap(mdev->mmio.va);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-ida_remove:
- ida_simple_remove(&g_mic_ida, mdev->id);
-ida_fail:
- kfree(mdev);
-mdev_alloc_fail:
- dev_err(&pdev->dev, "Probe failed rc %d\n", rc);
- return rc;
-}
-
-/**
- * mic_remove - Device Removal Routine
- * mic_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- *
- * @pdev: PCI device structure
- */
-static void mic_remove(struct pci_dev *pdev)
-{
- struct mic_device *mdev;
-
- mdev = pci_get_drvdata(pdev);
- if (!mdev)
- return;
-
- cosm_unregister_device(mdev->cosm_dev);
- mic_delete_debug_dir(mdev);
- mic_dp_uninit(mdev);
- mic_smpt_uninit(mdev);
- mic_free_interrupts(mdev, pdev);
- iounmap(mdev->aper.va);
- iounmap(mdev->mmio.va);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- ida_simple_remove(&g_mic_ida, mdev->id);
- kfree(mdev);
-}
-
-static struct pci_driver mic_driver = {
- .name = mic_driver_name,
- .id_table = mic_pci_tbl,
- .probe = mic_probe,
- .remove = mic_remove
-};
-
-static int __init mic_init(void)
-{
- int ret;
-
- request_module("mic_x100_dma");
- mic_init_debugfs();
- ida_init(&g_mic_ida);
- ret = pci_register_driver(&mic_driver);
- if (ret) {
- pr_err("pci_register_driver failed ret %d\n", ret);
- goto cleanup_debugfs;
- }
- return 0;
-cleanup_debugfs:
- ida_destroy(&g_mic_ida);
- mic_exit_debugfs();
- return ret;
-}
-
-static void __exit mic_exit(void)
-{
- pci_unregister_driver(&mic_driver);
- ida_destroy(&g_mic_ida);
- mic_exit_debugfs();
-}
-
-module_init(mic_init);
-module_exit(mic_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC X100 Host driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
deleted file mode 100644
index 50d1bebecd54..000000000000
--- a/drivers/misc/mic/host/mic_smpt.c
+++ /dev/null
@@ -1,427 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/pci.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-
-static inline u64 mic_system_page_mask(struct mic_device *mdev)
-{
- return (1ULL << mdev->smpt->info.page_shift) - 1ULL;
-}
-
-static inline u8 mic_sys_addr_to_smpt(struct mic_device *mdev, dma_addr_t pa)
-{
- return (pa - mdev->smpt->info.base) >> mdev->smpt->info.page_shift;
-}
-
-static inline u64 mic_smpt_to_pa(struct mic_device *mdev, u8 index)
-{
- return mdev->smpt->info.base + (index * mdev->smpt->info.page_size);
-}
-
-static inline u64 mic_smpt_offset(struct mic_device *mdev, dma_addr_t pa)
-{
- return pa & mic_system_page_mask(mdev);
-}
-
-static inline u64 mic_smpt_align_low(struct mic_device *mdev, dma_addr_t pa)
-{
- return ALIGN(pa - mic_system_page_mask(mdev),
- mdev->smpt->info.page_size);
-}
-
-static inline u64 mic_smpt_align_high(struct mic_device *mdev, dma_addr_t pa)
-{
- return ALIGN(pa, mdev->smpt->info.page_size);
-}
-
-/* Total Cumulative system memory accessible by MIC across all SMPT entries */
-static inline u64 mic_max_system_memory(struct mic_device *mdev)
-{
- return mdev->smpt->info.num_reg * mdev->smpt->info.page_size;
-}
-
-/* Maximum system memory address accessible by MIC */
-static inline u64 mic_max_system_addr(struct mic_device *mdev)
-{
- return mdev->smpt->info.base + mic_max_system_memory(mdev) - 1ULL;
-}
-
-/* Check if the DMA address is a MIC system memory address */
-static inline bool
-mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa)
-{
- return pa >= mdev->smpt->info.base && pa <= mic_max_system_addr(mdev);
-}
-
-/* Populate an SMPT entry and update the reference counts. */
-static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr,
- int entries, struct mic_device *mdev)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- int i;
-
- for (i = spt; i < spt + entries; i++,
- addr += smpt_info->info.page_size) {
- if (!smpt_info->entry[i].ref_count &&
- (smpt_info->entry[i].dma_addr != addr)) {
- mdev->smpt_ops->set(mdev, addr, i);
- smpt_info->entry[i].dma_addr = addr;
- }
- smpt_info->entry[i].ref_count += ref[i - spt];
- }
-}
-
-/*
- * Find an available MIC address in MIC SMPT address space
- * for a given DMA address and size.
- */
-static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr,
- int entries, s64 *ref, size_t size)
-{
- int spt;
- int ae = 0;
- int i;
- unsigned long flags;
- dma_addr_t mic_addr = 0;
- dma_addr_t addr = dma_addr;
- struct mic_smpt_info *smpt_info = mdev->smpt;
-
- spin_lock_irqsave(&smpt_info->smpt_lock, flags);
-
- /* find existing entries */
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- if (smpt_info->entry[i].dma_addr == addr) {
- ae++;
- addr += smpt_info->info.page_size;
- } else if (ae) /* cannot find contiguous entries */
- goto not_found;
-
- if (ae == entries)
- goto found;
- }
-
- /* find free entry */
- for (ae = 0, i = 0; i < smpt_info->info.num_reg; i++) {
- ae = (smpt_info->entry[i].ref_count == 0) ? ae + 1 : 0;
- if (ae == entries)
- goto found;
- }
-
-not_found:
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- return mic_addr;
-
-found:
- spt = i - entries + 1;
- mic_addr = mic_smpt_to_pa(mdev, spt);
- mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev);
- smpt_info->map_count++;
- smpt_info->ref_count += (s64)size;
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- return mic_addr;
-}
-
-/*
- * Returns number of smpt entries needed for dma_addr to dma_addr + size
- * also returns the reference count array for each of those entries
- * and the starting smpt address
- */
-static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
- size_t size, s64 *ref, u64 *smpt_start)
-{
- u64 start = dma_addr;
- u64 end = dma_addr + size;
- int i = 0;
-
- while (start < end) {
- ref[i++] = min(mic_smpt_align_high(mdev, start + 1),
- end) - start;
- start = mic_smpt_align_high(mdev, start + 1);
- }
-
- if (smpt_start)
- *smpt_start = mic_smpt_align_low(mdev, dma_addr);
-
- return i;
-}
-
-/*
- * mic_to_dma_addr - Converts a MIC address to a DMA address.
- *
- * @mdev: pointer to mic_device instance.
- * @mic_addr: MIC address.
- *
- * returns a DMA address.
- */
-dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- int spt;
- dma_addr_t dma_addr;
-
- if (!mic_is_system_addr(mdev, mic_addr)) {
- dev_err(&mdev->pdev->dev,
- "mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr);
- return -EINVAL;
- }
- spt = mic_sys_addr_to_smpt(mdev, mic_addr);
- dma_addr = smpt_info->entry[spt].dma_addr +
- mic_smpt_offset(mdev, mic_addr);
- return dma_addr;
-}
-
-/**
- * mic_map - Maps a DMA address to a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @dma_addr: DMA address.
- * @size: Size of the region to be mapped.
- *
- * This API converts the DMA address provided to a DMA address understood
- * by MIC. Caller should check for errors by calling mic_map_error(..).
- *
- * returns DMA address as required by MIC.
- */
-dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
-{
- dma_addr_t mic_addr = 0;
- int num_entries;
- s64 *ref;
- u64 smpt_start;
-
- if (!size || size > mic_max_system_memory(mdev))
- return mic_addr;
-
- ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
- if (!ref)
- return mic_addr;
-
- num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size,
- ref, &smpt_start);
-
- /* Set the smpt table appropriately and get 16G aligned mic address */
- mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size);
-
- kfree(ref);
-
- /*
- * If mic_addr is zero then its an error case
- * since mic_addr can never be zero.
- * else generate mic_addr by adding the 16G offset in dma_addr
- */
- if (!mic_addr && MIC_FAMILY_X100 == mdev->family) {
- dev_err(&mdev->pdev->dev,
- "mic_map failed dma_addr 0x%llx size 0x%lx\n",
- dma_addr, size);
- return mic_addr;
- } else {
- return mic_addr + mic_smpt_offset(mdev, dma_addr);
- }
-}
-
-/**
- * mic_unmap - Unmaps a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @mic_addr: MIC physical address.
- * @size: Size of the region to be unmapped.
- *
- * This API unmaps the mappings created by mic_map(..).
- *
- * returns None.
- */
-void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- s64 *ref;
- int num_smpt;
- int spt;
- int i;
- unsigned long flags;
-
- if (!size)
- return;
-
- if (!mic_is_system_addr(mdev, mic_addr)) {
- dev_err(&mdev->pdev->dev,
- "invalid address: 0x%llx\n", mic_addr);
- return;
- }
-
- spt = mic_sys_addr_to_smpt(mdev, mic_addr);
- ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
- if (!ref)
- return;
-
- /* Get number of smpt entries to be mapped, ref count array */
- num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL);
-
- spin_lock_irqsave(&smpt_info->smpt_lock, flags);
- smpt_info->unmap_count++;
- smpt_info->ref_count -= (s64)size;
-
- for (i = spt; i < spt + num_smpt; i++) {
- smpt_info->entry[i].ref_count -= ref[i - spt];
- if (smpt_info->entry[i].ref_count < 0)
- dev_warn(&mdev->pdev->dev,
- "ref count for entry %d is negative\n", i);
- }
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- kfree(ref);
-}
-
-/**
- * mic_map_single - Maps a virtual address to a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @va: Kernel direct mapped virtual address.
- * @size: Size of the region to be mapped.
- *
- * This API calls pci_map_single(..) for the direct mapped virtual address
- * and then converts the DMA address provided to a DMA address understood
- * by MIC. Caller should check for errors by calling mic_map_error(..).
- *
- * returns DMA address as required by MIC.
- */
-dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
-{
- dma_addr_t mic_addr = 0;
- struct pci_dev *pdev = mdev->pdev;
- dma_addr_t dma_addr =
- pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL);
-
- if (!pci_dma_mapping_error(pdev, dma_addr)) {
- mic_addr = mic_map(mdev, dma_addr, size);
- if (!mic_addr) {
- dev_err(&mdev->pdev->dev,
- "mic_map failed dma_addr 0x%llx size 0x%lx\n",
- dma_addr, size);
- pci_unmap_single(pdev, dma_addr,
- size, PCI_DMA_BIDIRECTIONAL);
- }
- }
- return mic_addr;
-}
-
-/**
- * mic_unmap_single - Unmaps a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @mic_addr: MIC physical address.
- * @size: Size of the region to be unmapped.
- *
- * This API unmaps the mappings created by mic_map_single(..).
- *
- * returns None.
- */
-void
-mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
-{
- struct pci_dev *pdev = mdev->pdev;
- dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr);
- mic_unmap(mdev, mic_addr, size);
- pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
-}
-
-/**
- * mic_smpt_init - Initialize MIC System Memory Page Tables.
- *
- * @mdev: pointer to mic_device instance.
- *
- * returns 0 for success and -errno for error.
- */
-int mic_smpt_init(struct mic_device *mdev)
-{
- int i, err = 0;
- dma_addr_t dma_addr;
- struct mic_smpt_info *smpt_info;
-
- mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL);
- if (!mdev->smpt)
- return -ENOMEM;
-
- smpt_info = mdev->smpt;
- mdev->smpt_ops->init(mdev);
- smpt_info->entry = kmalloc_array(smpt_info->info.num_reg,
- sizeof(*smpt_info->entry), GFP_KERNEL);
- if (!smpt_info->entry) {
- err = -ENOMEM;
- goto free_smpt;
- }
- spin_lock_init(&smpt_info->smpt_lock);
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- dma_addr = i * smpt_info->info.page_size;
- smpt_info->entry[i].dma_addr = dma_addr;
- smpt_info->entry[i].ref_count = 0;
- mdev->smpt_ops->set(mdev, dma_addr, i);
- }
- smpt_info->ref_count = 0;
- smpt_info->map_count = 0;
- smpt_info->unmap_count = 0;
- return 0;
-free_smpt:
- kfree(smpt_info);
- return err;
-}
-
-/**
- * mic_smpt_uninit - UnInitialize MIC System Memory Page Tables.
- *
- * @mdev: pointer to mic_device instance.
- *
- * returns None.
- */
-void mic_smpt_uninit(struct mic_device *mdev)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- int i;
-
- dev_dbg(&mdev->pdev->dev,
- "nodeid %d SMPT ref count %lld map %lld unmap %lld\n",
- mdev->id, smpt_info->ref_count,
- smpt_info->map_count, smpt_info->unmap_count);
-
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- dev_dbg(&mdev->pdev->dev,
- "SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n",
- i, smpt_info->entry[i].dma_addr,
- smpt_info->entry[i].ref_count);
- if (smpt_info->entry[i].ref_count)
- dev_warn(&mdev->pdev->dev,
- "ref count for entry %d is not zero\n", i);
- }
- kfree(smpt_info->entry);
- kfree(smpt_info);
-}
-
-/**
- * mic_smpt_restore - Restore MIC System Memory Page Tables.
- *
- * @mdev: pointer to mic_device instance.
- *
- * Restore the SMPT registers to values previously stored in the
- * SW data structures. Some MIC steppings lose register state
- * across resets and this API should be called for performing
- * a restore operation if required.
- *
- * returns None.
- */
-void mic_smpt_restore(struct mic_device *mdev)
-{
- int i;
- dma_addr_t dma_addr;
-
- for (i = 0; i < mdev->smpt->info.num_reg; i++) {
- dma_addr = mdev->smpt->entry[i].dma_addr;
- mdev->smpt_ops->set(mdev, dma_addr, i);
- }
-}
diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h
deleted file mode 100644
index 3b1ec14a9d81..000000000000
--- a/drivers/misc/mic/host/mic_smpt.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef MIC_SMPT_H
-#define MIC_SMPT_H
-/**
- * struct mic_smpt_ops - MIC HW specific SMPT operations.
- * @init: Initialize hardware specific SMPT information in mic_smpt_hw_info.
- * @set: Set the value for a particular SMPT entry.
- */
-struct mic_smpt_ops {
- void (*init)(struct mic_device *mdev);
- void (*set)(struct mic_device *mdev, dma_addr_t dma_addr, u8 index);
-};
-
-/**
- * struct mic_smpt - MIC SMPT entry information.
- * @dma_addr: Base DMA address for this SMPT entry.
- * @ref_count: Number of active mappings for this SMPT entry in bytes.
- */
-struct mic_smpt {
- dma_addr_t dma_addr;
- s64 ref_count;
-};
-
-/**
- * struct mic_smpt_hw_info - MIC SMPT hardware specific information.
- * @num_reg: Number of SMPT registers.
- * @page_shift: System memory page shift.
- * @page_size: System memory page size.
- * @base: System address base.
- */
-struct mic_smpt_hw_info {
- u8 num_reg;
- u8 page_shift;
- u64 page_size;
- u64 base;
-};
-
-/**
- * struct mic_smpt_info - MIC SMPT information.
- * @entry: Array of SMPT entries.
- * @smpt_lock: Spin lock protecting access to SMPT data structures.
- * @info: Hardware specific SMPT information.
- * @ref_count: Number of active SMPT mappings (for debug).
- * @map_count: Number of SMPT mappings created (for debug).
- * @unmap_count: Number of SMPT mappings destroyed (for debug).
- */
-struct mic_smpt_info {
- struct mic_smpt *entry;
- spinlock_t smpt_lock;
- struct mic_smpt_hw_info info;
- s64 ref_count;
- s64 map_count;
- s64 unmap_count;
-};
-
-dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size);
-void mic_unmap_single(struct mic_device *mdev,
- dma_addr_t mic_addr, size_t size);
-dma_addr_t mic_map(struct mic_device *mdev,
- dma_addr_t dma_addr, size_t size);
-void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
-dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr);
-
-/**
- * mic_map_error - Check a MIC address for errors.
- *
- * @mdev: pointer to mic_device instance.
- *
- * returns Whether there was an error during mic_map..(..) APIs.
- */
-static inline bool mic_map_error(dma_addr_t mic_addr)
-{
- return !mic_addr;
-}
-
-int mic_smpt_init(struct mic_device *mdev);
-void mic_smpt_uninit(struct mic_device *mdev);
-void mic_smpt_restore(struct mic_device *mdev);
-
-#endif
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
deleted file mode 100644
index f5536c1ad607..000000000000
--- a/drivers/misc/mic/host/mic_x100.c
+++ /dev/null
@@ -1,585 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/fs.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/delay.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_x100.h"
-#include "mic_smpt.h"
-
-static const u16 mic_x100_intr_init[] = {
- MIC_X100_DOORBELL_IDX_START,
- MIC_X100_DMA_IDX_START,
- MIC_X100_ERR_IDX_START,
- MIC_X100_NUM_DOORBELL,
- MIC_X100_NUM_DMA,
- MIC_X100_NUM_ERR,
-};
-
-/**
- * mic_x100_write_spad - write to the scratchpad register
- * @mdev: pointer to mic_device instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register.
- *
- * RETURNS: none.
- */
-static void
-mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val)
-{
- dev_dbg(&mdev->pdev->dev, "Writing 0x%x to scratch pad index %d\n",
- val, idx);
- mic_mmio_write(&mdev->mmio, val,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SPAD0 + idx * 4);
-}
-
-/**
- * mic_x100_read_spad - read from the scratchpad register
- * @mdev: pointer to mic_device instance
- * @idx: index to scratchpad register, 0 based
- *
- * This function allows reading of the 32bit scratchpad register.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static u32
-mic_x100_read_spad(struct mic_device *mdev, unsigned int idx)
-{
- u32 val = mic_mmio_read(&mdev->mmio,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SPAD0 + idx * 4);
-
- dev_dbg(&mdev->pdev->dev,
- "Reading 0x%x from scratch pad index %d\n", val, idx);
- return val;
-}
-
-/**
- * mic_x100_enable_interrupts - Enable interrupts.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_enable_interrupts(struct mic_device *mdev)
-{
- u32 reg;
- struct mic_mw *mw = &mdev->mmio;
- u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0;
- u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0;
-
- reg = mic_mmio_read(mw, sice0);
- reg |= MIC_X100_SBOX_DBR_BITS(0xf) | MIC_X100_SBOX_DMA_BITS(0xff);
- mic_mmio_write(mw, reg, sice0);
-
- /*
- * Enable auto-clear when enabling interrupts. Applicable only for
- * MSI-x. Legacy and MSI mode cannot have auto-clear enabled.
- */
- if (mdev->irq_info.num_vectors > 1) {
- reg = mic_mmio_read(mw, siac0);
- reg |= MIC_X100_SBOX_DBR_BITS(0xf) |
- MIC_X100_SBOX_DMA_BITS(0xff);
- mic_mmio_write(mw, reg, siac0);
- }
-}
-
-/**
- * mic_x100_disable_interrupts - Disable interrupts.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_disable_interrupts(struct mic_device *mdev)
-{
- u32 reg;
- struct mic_mw *mw = &mdev->mmio;
- u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0;
- u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0;
- u32 sicc0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICC0;
-
- reg = mic_mmio_read(mw, sice0);
- mic_mmio_write(mw, reg, sicc0);
-
- if (mdev->irq_info.num_vectors > 1) {
- reg = mic_mmio_read(mw, siac0);
- reg &= ~(MIC_X100_SBOX_DBR_BITS(0xf) |
- MIC_X100_SBOX_DMA_BITS(0xff));
- mic_mmio_write(mw, reg, siac0);
- }
-}
-
-/**
- * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
- * @mdev: pointer to mic_device instance
- * @doorbell: doorbell number
- */
-static void mic_x100_send_sbox_intr(struct mic_device *mdev,
- int doorbell)
-{
- struct mic_mw *mw = &mdev->mmio;
- u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
- u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
- apic_icr_offset);
-
- /* for MIC we need to make sure we "hit" the send_icr bit (13) */
- apicicr_low = (apicicr_low | (1 << 13));
-
- /* Ensure that the interrupt is ordered w.r.t. previous stores. */
- wmb();
- mic_mmio_write(mw, apicicr_low,
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
-}
-
-/**
- * mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC.
- * @mdev: pointer to mic_device instance
- * @doorbell: doorbell number
- */
-static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
- int doorbell)
-{
- int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
- /* Ensure that the interrupt is ordered w.r.t. previous stores. */
- wmb();
- mic_mmio_write(&mdev->mmio, 0,
- MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
-}
-
-/**
- * __mic_x100_send_intr - Send interrupt to MIC.
- * @mdev: pointer to mic_device instance
- * @doorbell: doorbell number.
- */
-static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
-{
- int rdmasr_db;
- if (doorbell < MIC_X100_NUM_SBOX_IRQ) {
- mic_x100_send_sbox_intr(mdev, doorbell);
- } else {
- rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ;
- mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
- }
-}
-
-/**
- * mic_x100_ack_interrupt - Read the interrupt sources register and
- * clear it. This function will be called in the MSI/INTx case.
- * @mdev: Pointer to mic_device instance.
- *
- * Returns: bitmask of interrupt sources triggered.
- */
-static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
-{
- u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0;
- u32 reg = mic_mmio_read(&mdev->mmio, sicr0);
- mic_mmio_write(&mdev->mmio, reg, sicr0);
- return reg;
-}
-
-/**
- * mic_x100_intr_workarounds - These hardware specific workarounds are
- * to be invoked everytime an interrupt is handled.
- * @mdev: Pointer to mic_device instance.
- *
- * Returns: none
- */
-static void mic_x100_intr_workarounds(struct mic_device *mdev)
-{
- struct mic_mw *mw = &mdev->mmio;
-
- /* Clear pending bit array. */
- if (MIC_A0_STEP == mdev->stepping)
- mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_MSIXPBACR);
-
- if (mdev->stepping >= MIC_B0_STEP)
- mdev->intr_ops->enable_interrupts(mdev);
-}
-
-/**
- * mic_x100_hw_intr_init - Initialize h/w specific interrupt
- * information.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_hw_intr_init(struct mic_device *mdev)
-{
- mdev->intr_info = (struct mic_intr_info *)mic_x100_intr_init;
-}
-
-/**
- * mic_x100_read_msi_to_src_map - read from the MSI mapping registers
- * @mdev: pointer to mic_device instance
- * @idx: index to the mapping register, 0 based
- *
- * This function allows reading of the 32bit MSI mapping register.
- *
- * RETURNS: The value in the register.
- */
-static u32
-mic_x100_read_msi_to_src_map(struct mic_device *mdev, int idx)
-{
- return mic_mmio_read(&mdev->mmio,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_MXAR0 + idx * 4);
-}
-
-/**
- * mic_x100_program_msi_to_src_map - program the MSI mapping registers
- * @mdev: pointer to mic_device instance
- * @idx: index to the mapping register, 0 based
- * @offset: The bit offset in the register that needs to be updated.
- * @set: boolean specifying if the bit in the specified offset needs
- * to be set or cleared.
- *
- * RETURNS: None.
- */
-static void
-mic_x100_program_msi_to_src_map(struct mic_device *mdev,
- int idx, int offset, bool set)
-{
- unsigned long reg;
- struct mic_mw *mw = &mdev->mmio;
- u32 mxar = MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_MXAR0 + idx * 4;
-
- reg = mic_mmio_read(mw, mxar);
- if (set)
- __set_bit(offset, &reg);
- else
- __clear_bit(offset, &reg);
- mic_mmio_write(mw, reg, mxar);
-}
-
-/*
- * mic_x100_reset_fw_ready - Reset Firmware ready status field.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_reset_fw_ready(struct mic_device *mdev)
-{
- mdev->ops->write_spad(mdev, MIC_X100_DOWNLOAD_INFO, 0);
-}
-
-/*
- * mic_x100_is_fw_ready - Check if firmware is ready.
- * @mdev: pointer to mic_device instance
- */
-static bool mic_x100_is_fw_ready(struct mic_device *mdev)
-{
- u32 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
- return MIC_X100_SPAD2_DOWNLOAD_STATUS(scratch2) ? true : false;
-}
-
-/**
- * mic_x100_get_apic_id - Get bootstrap APIC ID.
- * @mdev: pointer to mic_device instance
- */
-static u32 mic_x100_get_apic_id(struct mic_device *mdev)
-{
- u32 scratch2 = 0;
-
- scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
- return MIC_X100_SPAD2_APIC_ID(scratch2);
-}
-
-/**
- * mic_x100_send_firmware_intr - Send an interrupt to the firmware on MIC.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_send_firmware_intr(struct mic_device *mdev)
-{
- u32 apicicr_low;
- u64 apic_icr_offset = MIC_X100_SBOX_APICICR7;
- int vector = MIC_X100_BSP_INTERRUPT_VECTOR;
- struct mic_mw *mw = &mdev->mmio;
-
- /*
- * For MIC we need to make sure we "hit"
- * the send_icr bit (13).
- */
- apicicr_low = (vector | (1 << 13));
-
- mic_mmio_write(mw, mic_x100_get_apic_id(mdev),
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset + 4);
-
- /* Ensure that the interrupt is ordered w.r.t. previous stores. */
- wmb();
- mic_mmio_write(mw, apicicr_low,
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
-}
-
-/**
- * mic_x100_hw_reset - Reset the MIC device.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_hw_reset(struct mic_device *mdev)
-{
- u32 reset_reg;
- u32 rgcr = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_RGCR;
- struct mic_mw *mw = &mdev->mmio;
-
- /* Ensure that the reset is ordered w.r.t. previous loads and stores */
- mb();
- /* Trigger reset */
- reset_reg = mic_mmio_read(mw, rgcr);
- reset_reg |= 0x1;
- mic_mmio_write(mw, reset_reg, rgcr);
- /*
- * It seems we really want to delay at least 1 second
- * after touching reset to prevent a lot of problems.
- */
- msleep(1000);
-}
-
-/**
- * mic_x100_load_command_line - Load command line to MIC.
- * @mdev: pointer to mic_device instance
- * @fw: the firmware image
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
-{
- u32 len = 0;
- u32 boot_mem;
- char *buf;
- void __iomem *cmd_line_va = mdev->aper.va + mdev->bootaddr + fw->size;
-#define CMDLINE_SIZE 2048
-
- boot_mem = mdev->aper.len >> 20;
- buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += scnprintf(buf, CMDLINE_SIZE - len,
- " mem=%dM", boot_mem);
- if (mdev->cosm_dev->cmdline)
- scnprintf(buf + len, CMDLINE_SIZE - len, " %s",
- mdev->cosm_dev->cmdline);
- memcpy_toio(cmd_line_va, buf, strlen(buf) + 1);
- kfree(buf);
- return 0;
-}
-
-/**
- * mic_x100_load_ramdisk - Load ramdisk to MIC.
- * @mdev: pointer to mic_device instance
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_load_ramdisk(struct mic_device *mdev)
-{
- const struct firmware *fw;
- int rc;
- struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr;
-
- rc = request_firmware(&fw, mdev->cosm_dev->ramdisk, &mdev->pdev->dev);
- if (rc < 0) {
- dev_err(&mdev->pdev->dev,
- "ramdisk request_firmware failed: %d %s\n",
- rc, mdev->cosm_dev->ramdisk);
- goto error;
- }
- /*
- * Typically the bootaddr for card OS is 64M
- * so copy over the ramdisk @ 128M.
- */
- memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
- iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image);
- iowrite32(fw->size, &bp->hdr.ramdisk_size);
- release_firmware(fw);
-error:
- return rc;
-}
-
-/**
- * mic_x100_get_boot_addr - Get MIC boot address.
- * @mdev: pointer to mic_device instance
- *
- * This function is called during firmware load to determine
- * the address at which the OS should be downloaded in card
- * memory i.e. GDDR.
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_get_boot_addr(struct mic_device *mdev)
-{
- u32 scratch2, boot_addr;
- int rc = 0;
-
- scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
- boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2);
- dev_dbg(&mdev->pdev->dev, "%s %d boot_addr 0x%x\n",
- __func__, __LINE__, boot_addr);
- if (boot_addr > (1 << 31)) {
- dev_err(&mdev->pdev->dev,
- "incorrect bootaddr 0x%x\n",
- boot_addr);
- rc = -EINVAL;
- goto error;
- }
- mdev->bootaddr = boot_addr;
-error:
- return rc;
-}
-
-/**
- * mic_x100_load_firmware - Load firmware to MIC.
- * @mdev: pointer to mic_device instance
- * @buf: buffer containing boot string including firmware/ramdisk path.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
-{
- int rc;
- const struct firmware *fw;
-
- rc = mic_x100_get_boot_addr(mdev);
- if (rc)
- return rc;
- /* load OS */
- rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
- if (rc < 0) {
- dev_err(&mdev->pdev->dev,
- "ramdisk request_firmware failed: %d %s\n",
- rc, mdev->cosm_dev->firmware);
- return rc;
- }
- if (mdev->bootaddr > mdev->aper.len - fw->size) {
- rc = -EINVAL;
- dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
- __func__, __LINE__, rc, mdev->bootaddr);
- goto error;
- }
- memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
- mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
- if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
- rc = -EINVAL;
- dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
- __func__, __LINE__, rc);
- goto error;
- }
- /* load command line */
- rc = mic_x100_load_command_line(mdev, fw);
- if (rc) {
- dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
- __func__, __LINE__, rc);
- goto error;
- }
- release_firmware(fw);
- /* load ramdisk */
- if (mdev->cosm_dev->ramdisk)
- rc = mic_x100_load_ramdisk(mdev);
-
- return rc;
-
-error:
- release_firmware(fw);
- return rc;
-}
-
-/**
- * mic_x100_get_postcode - Get postcode status from firmware.
- * @mdev: pointer to mic_device instance
- *
- * RETURNS: postcode.
- */
-static u32 mic_x100_get_postcode(struct mic_device *mdev)
-{
- return mic_mmio_read(&mdev->mmio, MIC_X100_POSTCODE);
-}
-
-/**
- * mic_x100_smpt_set - Update an SMPT entry with a DMA address.
- * @mdev: pointer to mic_device instance
- * @dma_addr: DMA address to use
- * @index: entry to write to
- *
- * RETURNS: none.
- */
-static void
-mic_x100_smpt_set(struct mic_device *mdev, dma_addr_t dma_addr, u8 index)
-{
-#define SNOOP_ON (0 << 0)
-#define SNOOP_OFF (1 << 0)
-/*
- * Sbox Smpt Reg Bits:
- * Bits 31:2 Host address
- * Bits 1 RSVD
- * Bits 0 No snoop
- */
-#define BUILD_SMPT(NO_SNOOP, HOST_ADDR) \
- (u32)(((HOST_ADDR) << 2) | ((NO_SNOOP) & 0x01))
-
- uint32_t smpt_reg_val = BUILD_SMPT(SNOOP_ON,
- dma_addr >> mdev->smpt->info.page_shift);
- mic_mmio_write(&mdev->mmio, smpt_reg_val,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SMPT00 + (4 * index));
-}
-
-/**
- * mic_x100_smpt_hw_init - Initialize SMPT X100 specific fields.
- * @mdev: pointer to mic_device instance
- *
- * RETURNS: none.
- */
-static void mic_x100_smpt_hw_init(struct mic_device *mdev)
-{
- struct mic_smpt_hw_info *info = &mdev->smpt->info;
-
- info->num_reg = 32;
- info->page_shift = 34;
- info->page_size = (1ULL << info->page_shift);
- info->base = 0x8000000000ULL;
-}
-
-struct mic_smpt_ops mic_x100_smpt_ops = {
- .init = mic_x100_smpt_hw_init,
- .set = mic_x100_smpt_set,
-};
-
-static bool mic_x100_dma_filter(struct dma_chan *chan, void *param)
-{
- if (chan->device->dev->parent == (struct device *)param)
- return true;
- return false;
-}
-
-struct mic_hw_ops mic_x100_ops = {
- .aper_bar = MIC_X100_APER_BAR,
- .mmio_bar = MIC_X100_MMIO_BAR,
- .read_spad = mic_x100_read_spad,
- .write_spad = mic_x100_write_spad,
- .send_intr = mic_x100_send_intr,
- .ack_interrupt = mic_x100_ack_interrupt,
- .intr_workarounds = mic_x100_intr_workarounds,
- .reset = mic_x100_hw_reset,
- .reset_fw_ready = mic_x100_reset_fw_ready,
- .is_fw_ready = mic_x100_is_fw_ready,
- .send_firmware_intr = mic_x100_send_firmware_intr,
- .load_mic_fw = mic_x100_load_firmware,
- .get_postcode = mic_x100_get_postcode,
- .dma_filter = mic_x100_dma_filter,
-};
-
-struct mic_hw_intr_ops mic_x100_intr_ops = {
- .intr_init = mic_x100_hw_intr_init,
- .enable_interrupts = mic_x100_enable_interrupts,
- .disable_interrupts = mic_x100_disable_interrupts,
- .program_msi_to_src_map = mic_x100_program_msi_to_src_map,
- .read_msi_to_src_map = mic_x100_read_msi_to_src_map,
-};
diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h
deleted file mode 100644
index aebcaed6fa72..000000000000
--- a/drivers/misc/mic/host/mic_x100.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef _MIC_X100_HW_H_
-#define _MIC_X100_HW_H_
-
-#define MIC_X100_PCI_DEVICE_2250 0x2250
-#define MIC_X100_PCI_DEVICE_2251 0x2251
-#define MIC_X100_PCI_DEVICE_2252 0x2252
-#define MIC_X100_PCI_DEVICE_2253 0x2253
-#define MIC_X100_PCI_DEVICE_2254 0x2254
-#define MIC_X100_PCI_DEVICE_2255 0x2255
-#define MIC_X100_PCI_DEVICE_2256 0x2256
-#define MIC_X100_PCI_DEVICE_2257 0x2257
-#define MIC_X100_PCI_DEVICE_2258 0x2258
-#define MIC_X100_PCI_DEVICE_2259 0x2259
-#define MIC_X100_PCI_DEVICE_225a 0x225a
-#define MIC_X100_PCI_DEVICE_225b 0x225b
-#define MIC_X100_PCI_DEVICE_225c 0x225c
-#define MIC_X100_PCI_DEVICE_225d 0x225d
-#define MIC_X100_PCI_DEVICE_225e 0x225e
-
-#define MIC_X100_APER_BAR 0
-#define MIC_X100_MMIO_BAR 4
-
-#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000
-#define MIC_X100_SBOX_SPAD0 0x0000AB20
-#define MIC_X100_SBOX_SICR0_DBR(x) ((x) & 0xf)
-#define MIC_X100_SBOX_SICR0_DMA(x) (((x) >> 8) & 0xff)
-#define MIC_X100_SBOX_SICE0_DBR(x) ((x) & 0xf)
-#define MIC_X100_SBOX_DBR_BITS(x) ((x) & 0xf)
-#define MIC_X100_SBOX_SICE0_DMA(x) (((x) >> 8) & 0xff)
-#define MIC_X100_SBOX_DMA_BITS(x) (((x) & 0xff) << 8)
-
-#define MIC_X100_SBOX_APICICR0 0x0000A9D0
-#define MIC_X100_SBOX_SICR0 0x00009004
-#define MIC_X100_SBOX_SICE0 0x0000900C
-#define MIC_X100_SBOX_SICC0 0x00009010
-#define MIC_X100_SBOX_SIAC0 0x00009014
-#define MIC_X100_SBOX_MSIXPBACR 0x00009084
-#define MIC_X100_SBOX_MXAR0 0x00009044
-#define MIC_X100_SBOX_SMPT00 0x00003100
-#define MIC_X100_SBOX_RDMASR0 0x0000B180
-
-#define MIC_X100_DOORBELL_IDX_START 0
-#define MIC_X100_NUM_DOORBELL 4
-#define MIC_X100_DMA_IDX_START 8
-#define MIC_X100_NUM_DMA 8
-#define MIC_X100_ERR_IDX_START 30
-#define MIC_X100_NUM_ERR 1
-
-#define MIC_X100_NUM_SBOX_IRQ 8
-#define MIC_X100_NUM_RDMASR_IRQ 8
-#define MIC_X100_RDMASR_IRQ_BASE 17
-#define MIC_X100_SPAD2_DOWNLOAD_STATUS(x) ((x) & 0x1)
-#define MIC_X100_SPAD2_APIC_ID(x) (((x) >> 1) & 0x1ff)
-#define MIC_X100_SPAD2_DOWNLOAD_ADDR(x) ((x) & 0xfffff000)
-#define MIC_X100_SBOX_APICICR7 0x0000AA08
-#define MIC_X100_SBOX_RGCR 0x00004010
-#define MIC_X100_SBOX_SDBIC0 0x0000CC90
-#define MIC_X100_DOWNLOAD_INFO 2
-#define MIC_X100_FW_SIZE 5
-#define MIC_X100_POSTCODE 0x242c
-
-/* Host->Card(bootstrap) Interrupt Vector */
-#define MIC_X100_BSP_INTERRUPT_VECTOR 229
-
-extern struct mic_hw_ops mic_x100_ops;
-extern struct mic_smpt_ops mic_x100_smpt_ops;
-extern struct mic_hw_intr_ops mic_x100_intr_ops;
-
-#endif
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
deleted file mode 100644
index ff372555d118..000000000000
--- a/drivers/misc/mic/scif/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - SCIF driver.
-# Copyright(c) 2014, Intel Corporation.
-#
-obj-$(CONFIG_SCIF) += scif.o
-scif-objs := scif_main.o
-scif-objs += scif_peer_bus.o
-scif-objs += scif_ports.o
-scif-objs += scif_debugfs.o
-scif-objs += scif_fd.o
-scif-objs += scif_api.o
-scif-objs += scif_epd.o
-scif-objs += scif_rb.o
-scif-objs += scif_nodeqp.o
-scif-objs += scif_nm.o
-scif-objs += scif_dma.o
-scif-objs += scif_fence.o
-scif-objs += scif_mmap.o
-scif-objs += scif_rma.o
-scif-objs += scif_rma_list.o
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
deleted file mode 100644
index 304d6c833712..000000000000
--- a/drivers/misc/mic/scif/scif_api.c
+++ /dev/null
@@ -1,1485 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/scif.h>
-#include "scif_main.h"
-#include "scif_map.h"
-
-static const char * const scif_ep_states[] = {
- "Unbound",
- "Bound",
- "Listening",
- "Connected",
- "Connecting",
- "Mapping",
- "Closing",
- "Close Listening",
- "Disconnected",
- "Zombie"};
-
-enum conn_async_state {
- ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
- ASYNC_CONN_INPROGRESS, /* async connect in progress */
- ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
-};
-
-/*
- * File operations for anonymous inode file associated with a SCIF endpoint,
- * used in kernel mode SCIF poll. Kernel mode SCIF poll calls portions of the
- * poll API in the kernel and these take in a struct file *. Since a struct
- * file is not available to kernel mode SCIF, it uses an anonymous file for
- * this purpose.
- */
-const struct file_operations scif_anon_fops = {
- .owner = THIS_MODULE,
-};
-
-scif_epd_t scif_open(void)
-{
- struct scif_endpt *ep;
- int err;
-
- might_sleep();
- ep = kzalloc(sizeof(*ep), GFP_KERNEL);
- if (!ep)
- goto err_ep_alloc;
-
- ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
- if (!ep->qp_info.qp)
- goto err_qp_alloc;
-
- err = scif_anon_inode_getfile(ep);
- if (err)
- goto err_anon_inode;
-
- spin_lock_init(&ep->lock);
- mutex_init(&ep->sendlock);
- mutex_init(&ep->recvlock);
-
- scif_rma_ep_init(ep);
- ep->state = SCIFEP_UNBOUND;
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI open: ep %p success\n", ep);
- return ep;
-
-err_anon_inode:
- kfree(ep->qp_info.qp);
-err_qp_alloc:
- kfree(ep);
-err_ep_alloc:
- return NULL;
-}
-EXPORT_SYMBOL_GPL(scif_open);
-
-/*
- * scif_disconnect_ep - Disconnects the endpoint if found
- * @epd: The end point returned from scif_open()
- */
-static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
-{
- struct scifmsg msg;
- struct scif_endpt *fep = NULL;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
- int err;
-
- /*
- * Wake up any threads blocked in send()/recv() before closing
- * out the connection. Grabbing and releasing the send/recv lock
- * will ensure that any blocked senders/receivers have exited for
- * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
- * close. Ring 3 endpoints are not affected since close will not
- * be called while there are IOCTLs executing.
- */
- wake_up_interruptible(&ep->sendwq);
- wake_up_interruptible(&ep->recvwq);
- mutex_lock(&ep->sendlock);
- mutex_unlock(&ep->sendlock);
- mutex_lock(&ep->recvlock);
- mutex_unlock(&ep->recvlock);
-
- /* Remove from the connected list */
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep) {
- list_del(pos);
- fep = tmpep;
- spin_lock(&ep->lock);
- break;
- }
- }
-
- if (!fep) {
- /*
- * The other side has completed the disconnect before
- * the end point can be removed from the list. Therefore
- * the ep lock is not locked, traverse the disconnected
- * list to find the endpoint and release the conn lock.
- */
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.connlock);
- return NULL;
- }
-
- init_completion(&ep->discon);
- msg.uop = SCIF_DISCNCT;
- msg.src = ep->port;
- msg.dst = ep->peer;
- msg.payload[0] = (u64)ep;
- msg.payload[1] = ep->remote_ep;
-
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- spin_unlock(&ep->lock);
- mutex_unlock(&scif_info.connlock);
-
- if (!err)
- /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
- wait_for_completion_timeout(&ep->discon,
- SCIF_NODE_ALIVE_TIMEOUT);
- return ep;
-}
-
-int scif_close(scif_epd_t epd)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
- enum scif_epd_state oldstate;
- bool flush_conn;
-
- dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
- ep, scif_ep_states[ep->state]);
- might_sleep();
- spin_lock(&ep->lock);
- flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
- spin_unlock(&ep->lock);
-
- if (flush_conn)
- flush_work(&scif_info.conn_work);
-
- spin_lock(&ep->lock);
- oldstate = ep->state;
-
- ep->state = SCIFEP_CLOSING;
-
- switch (oldstate) {
- case SCIFEP_ZOMBIE:
- dev_err(scif_info.mdev.this_device,
- "SCIFAPI close: zombie state unexpected\n");
- fallthrough;
- case SCIFEP_DISCONNECTED:
- spin_unlock(&ep->lock);
- scif_unregister_all_windows(epd);
- /* Remove from the disconnected list */
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.connlock);
- break;
- case SCIFEP_UNBOUND:
- case SCIFEP_BOUND:
- case SCIFEP_CONNECTING:
- spin_unlock(&ep->lock);
- break;
- case SCIFEP_MAPPING:
- case SCIFEP_CONNECTED:
- case SCIFEP_CLOSING:
- {
- spin_unlock(&ep->lock);
- scif_unregister_all_windows(epd);
- scif_disconnect_ep(ep);
- break;
- }
- case SCIFEP_LISTENING:
- case SCIFEP_CLLISTEN:
- {
- struct scif_conreq *conreq;
- struct scifmsg msg;
- struct scif_endpt *aep;
-
- spin_unlock(&ep->lock);
- mutex_lock(&scif_info.eplock);
-
- /* remove from listen list */
- list_for_each_safe(pos, tmpq, &scif_info.listen) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep)
- list_del(pos);
- }
- /* Remove any dangling accepts */
- while (ep->acceptcnt) {
- aep = list_first_entry(&ep->li_accept,
- struct scif_endpt, liacceptlist);
- list_del(&aep->liacceptlist);
- scif_put_port(aep->port.port);
- list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
- tmpep = list_entry(pos, struct scif_endpt,
- miacceptlist);
- if (tmpep == aep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.eplock);
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- tmpep = list_entry(pos,
- struct scif_endpt, list);
- if (tmpep == aep) {
- list_del(pos);
- break;
- }
- }
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- tmpep = list_entry(pos,
- struct scif_endpt, list);
- if (tmpep == aep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.connlock);
- scif_teardown_ep(aep);
- mutex_lock(&scif_info.eplock);
- scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
- ep->acceptcnt--;
- }
-
- spin_lock(&ep->lock);
- mutex_unlock(&scif_info.eplock);
-
- /* Remove and reject any pending connection requests. */
- while (ep->conreqcnt) {
- conreq = list_first_entry(&ep->conlist,
- struct scif_conreq, list);
- list_del(&conreq->list);
-
- msg.uop = SCIF_CNCT_REJ;
- msg.dst.node = conreq->msg.src.node;
- msg.dst.port = conreq->msg.src.port;
- msg.payload[0] = conreq->msg.payload[0];
- msg.payload[1] = conreq->msg.payload[1];
- /*
- * No Error Handling on purpose for scif_nodeqp_send().
- * If the remote node is lost we still want free the
- * connection requests on the self node.
- */
- scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
- &msg);
- ep->conreqcnt--;
- kfree(conreq);
- }
-
- spin_unlock(&ep->lock);
- /* If a kSCIF accept is waiting wake it up */
- wake_up_interruptible(&ep->conwq);
- break;
- }
- }
- scif_put_port(ep->port.port);
- scif_anon_inode_fput(ep);
- scif_teardown_ep(ep);
- scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
- return 0;
-}
-EXPORT_SYMBOL_GPL(scif_close);
-
-/**
- * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
- * accept new connections.
- * @epd: The end point returned from scif_open()
- */
-int __scif_flush(scif_epd_t epd)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- switch (ep->state) {
- case SCIFEP_LISTENING:
- {
- ep->state = SCIFEP_CLLISTEN;
-
- /* If an accept is waiting wake it up */
- wake_up_interruptible(&ep->conwq);
- break;
- }
- default:
- break;
- }
- return 0;
-}
-
-int scif_bind(scif_epd_t epd, u16 pn)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int ret = 0;
- int tmp;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI bind: ep %p %s requested port number %d\n",
- ep, scif_ep_states[ep->state], pn);
- if (pn) {
- /*
- * Similar to IETF RFC 1700, SCIF ports below
- * SCIF_ADMIN_PORT_END can only be bound by system (or root)
- * processes or by processes executed by privileged users.
- */
- if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
- ret = -EACCES;
- goto scif_bind_admin_exit;
- }
- }
-
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_BOUND) {
- ret = -EINVAL;
- goto scif_bind_exit;
- } else if (ep->state != SCIFEP_UNBOUND) {
- ret = -EISCONN;
- goto scif_bind_exit;
- }
-
- if (pn) {
- tmp = scif_rsrv_port(pn);
- if (tmp != pn) {
- ret = -EINVAL;
- goto scif_bind_exit;
- }
- } else {
- ret = scif_get_new_port();
- if (ret < 0)
- goto scif_bind_exit;
- pn = ret;
- }
-
- ep->state = SCIFEP_BOUND;
- ep->port.node = scif_info.nodeid;
- ep->port.port = pn;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- ret = pn;
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI bind: bound to port number %d\n", pn);
-scif_bind_exit:
- spin_unlock(&ep->lock);
-scif_bind_admin_exit:
- return ret;
-}
-EXPORT_SYMBOL_GPL(scif_bind);
-
-int scif_listen(scif_epd_t epd, int backlog)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
- spin_lock(&ep->lock);
- switch (ep->state) {
- case SCIFEP_ZOMBIE:
- case SCIFEP_CLOSING:
- case SCIFEP_CLLISTEN:
- case SCIFEP_UNBOUND:
- case SCIFEP_DISCONNECTED:
- spin_unlock(&ep->lock);
- return -EINVAL;
- case SCIFEP_LISTENING:
- case SCIFEP_CONNECTED:
- case SCIFEP_CONNECTING:
- case SCIFEP_MAPPING:
- spin_unlock(&ep->lock);
- return -EISCONN;
- case SCIFEP_BOUND:
- break;
- }
-
- ep->state = SCIFEP_LISTENING;
- ep->backlog = backlog;
-
- ep->conreqcnt = 0;
- ep->acceptcnt = 0;
- INIT_LIST_HEAD(&ep->conlist);
- init_waitqueue_head(&ep->conwq);
- INIT_LIST_HEAD(&ep->li_accept);
- spin_unlock(&ep->lock);
-
- /*
- * Listen status is complete so delete the qp information not needed
- * on a listen before placing on the list of listening ep's
- */
- scif_teardown_ep(ep);
- ep->qp_info.qp = NULL;
-
- mutex_lock(&scif_info.eplock);
- list_add_tail(&ep->list, &scif_info.listen);
- mutex_unlock(&scif_info.eplock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(scif_listen);
-
-/*
- ************************************************************************
- * SCIF connection flow:
- *
- * 1) A SCIF listening endpoint can call scif_accept(..) to wait for SCIF
- * connections via a SCIF_CNCT_REQ message
- * 2) A SCIF endpoint can initiate a SCIF connection by calling
- * scif_connect(..) which calls scif_setup_qp_connect(..) which
- * allocates the local qp for the endpoint ring buffer and then sends
- * a SCIF_CNCT_REQ to the remote node and waits for a SCIF_CNCT_GNT or
- * a SCIF_CNCT_REJ message
- * 3) The peer node handles a SCIF_CNCT_REQ via scif_cnctreq_resp(..) which
- * wakes up any threads blocked in step 1 or sends a SCIF_CNCT_REJ
- * message otherwise
- * 4) A thread blocked waiting for incoming connections allocates its local
- * endpoint QP and ring buffer following which it sends a SCIF_CNCT_GNT
- * and waits for a SCIF_CNCT_GNT(N)ACK. If the allocation fails then
- * the node sends a SCIF_CNCT_REJ message
- * 5) Upon receipt of a SCIF_CNCT_GNT or a SCIF_CNCT_REJ message the
- * connecting endpoint is woken up as part of handling
- * scif_cnctgnt_resp(..) following which it maps the remote endpoints'
- * QP, updates its outbound QP and sends a SCIF_CNCT_GNTACK message on
- * success or a SCIF_CNCT_GNTNACK message on failure and completes
- * the scif_connect(..) API
- * 6) Upon receipt of a SCIF_CNCT_GNT(N)ACK the accepting endpoint blocked
- * in step 4 is woken up and completes the scif_accept(..) API
- * 7) The SCIF connection is now established between the two SCIF endpoints.
- */
-static int scif_conn_func(struct scif_endpt *ep)
-{
- int err = 0;
- struct scifmsg msg;
- struct device *spdev;
-
- err = scif_reserve_dma_chan(ep);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- ep->state = SCIFEP_BOUND;
- goto connect_error_simple;
- }
- /* Initiate the first part of the endpoint QP setup */
- err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
- SCIF_ENDPT_QP_SIZE, ep->remote_dev);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s err %d qp_offset 0x%llx\n",
- __func__, err, ep->qp_info.qp_offset);
- ep->state = SCIFEP_BOUND;
- goto connect_error_simple;
- }
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- goto cleanup_qp;
- }
- /* Format connect message and send it */
- msg.src = ep->port;
- msg.dst = ep->conn_port;
- msg.uop = SCIF_CNCT_REQ;
- msg.payload[0] = (u64)ep;
- msg.payload[1] = ep->qp_info.qp_offset;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- if (err)
- goto connect_error_dec;
- scif_put_peer_dev(spdev);
- /*
- * Wait for the remote node to respond with SCIF_CNCT_GNT or
- * SCIF_CNCT_REJ message.
- */
- err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d timeout\n", __func__, __LINE__);
- ep->state = SCIFEP_BOUND;
- }
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- goto cleanup_qp;
- }
- if (ep->state == SCIFEP_MAPPING) {
- err = scif_setup_qp_connect_response(ep->remote_dev,
- ep->qp_info.qp,
- ep->qp_info.gnt_pld);
- /*
- * If the resource to map the queue are not available then
- * we need to tell the other side to terminate the accept
- */
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- msg.uop = SCIF_CNCT_GNTNACK;
- msg.payload[0] = ep->remote_ep;
- _scif_nodeqp_send(ep->remote_dev, &msg);
- ep->state = SCIFEP_BOUND;
- goto connect_error_dec;
- }
-
- msg.uop = SCIF_CNCT_GNTACK;
- msg.payload[0] = ep->remote_ep;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- if (err) {
- ep->state = SCIFEP_BOUND;
- goto connect_error_dec;
- }
- ep->state = SCIFEP_CONNECTED;
- mutex_lock(&scif_info.connlock);
- list_add_tail(&ep->list, &scif_info.connected);
- mutex_unlock(&scif_info.connlock);
- dev_dbg(&ep->remote_dev->sdev->dev,
- "SCIFAPI connect: ep %p connected\n", ep);
- } else if (ep->state == SCIFEP_BOUND) {
- dev_dbg(&ep->remote_dev->sdev->dev,
- "SCIFAPI connect: ep %p connection refused\n", ep);
- err = -ECONNREFUSED;
- goto connect_error_dec;
- }
- scif_put_peer_dev(spdev);
- return err;
-connect_error_dec:
- scif_put_peer_dev(spdev);
-cleanup_qp:
- scif_cleanup_ep_qp(ep);
-connect_error_simple:
- return err;
-}
-
-/*
- * scif_conn_handler:
- *
- * Workqueue handler for servicing non-blocking SCIF connect
- *
- */
-void scif_conn_handler(struct work_struct *work)
-{
- struct scif_endpt *ep;
-
- do {
- ep = NULL;
- spin_lock(&scif_info.nb_connect_lock);
- if (!list_empty(&scif_info.nb_connect_list)) {
- ep = list_first_entry(&scif_info.nb_connect_list,
- struct scif_endpt, conn_list);
- list_del(&ep->conn_list);
- }
- spin_unlock(&scif_info.nb_connect_lock);
- if (ep) {
- ep->conn_err = scif_conn_func(ep);
- wake_up_interruptible(&ep->conn_pend_wq);
- }
- } while (ep);
-}
-
-int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
- struct scif_dev *remote_dev;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
- scif_ep_states[ep->state]);
-
- if (!scif_dev || dst->node > scif_info.maxid)
- return -ENODEV;
-
- might_sleep();
-
- remote_dev = &scif_dev[dst->node];
- spdev = scif_get_peer_dev(remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
-
- spin_lock(&ep->lock);
- switch (ep->state) {
- case SCIFEP_ZOMBIE:
- case SCIFEP_CLOSING:
- err = -EINVAL;
- break;
- case SCIFEP_DISCONNECTED:
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
- ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
- else
- err = -EINVAL;
- break;
- case SCIFEP_LISTENING:
- case SCIFEP_CLLISTEN:
- err = -EOPNOTSUPP;
- break;
- case SCIFEP_CONNECTING:
- case SCIFEP_MAPPING:
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
- err = -EINPROGRESS;
- else
- err = -EISCONN;
- break;
- case SCIFEP_CONNECTED:
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
- ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
- else
- err = -EISCONN;
- break;
- case SCIFEP_UNBOUND:
- err = scif_get_new_port();
- if (err < 0)
- break;
- ep->port.port = err;
- ep->port.node = scif_info.nodeid;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- fallthrough;
- case SCIFEP_BOUND:
- /*
- * If a non-blocking connect has been already initiated
- * (conn_async_state is either ASYNC_CONN_INPROGRESS or
- * ASYNC_CONN_FLUSH_WORK), the end point could end up in
- * SCIF_BOUND due an error in the connection process
- * (e.g., connection refused) If conn_async_state is
- * ASYNC_CONN_INPROGRESS - transition to ASYNC_CONN_FLUSH_WORK
- * so that the error status can be collected. If the state is
- * already ASYNC_CONN_FLUSH_WORK - then set the error to
- * EINPROGRESS since some other thread is waiting to collect
- * error status.
- */
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
- } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
- err = -EINPROGRESS;
- } else {
- ep->conn_port = *dst;
- init_waitqueue_head(&ep->sendwq);
- init_waitqueue_head(&ep->recvwq);
- init_waitqueue_head(&ep->conwq);
- ep->conn_async_state = 0;
-
- if (unlikely(non_block))
- ep->conn_async_state = ASYNC_CONN_INPROGRESS;
- }
- break;
- }
-
- if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK)
- goto connect_simple_unlock1;
-
- ep->state = SCIFEP_CONNECTING;
- ep->remote_dev = &scif_dev[dst->node];
- ep->qp_info.qp->magic = SCIFEP_MAGIC;
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- init_waitqueue_head(&ep->conn_pend_wq);
- spin_lock(&scif_info.nb_connect_lock);
- list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
- spin_unlock(&scif_info.nb_connect_lock);
- err = -EINPROGRESS;
- schedule_work(&scif_info.conn_work);
- }
-connect_simple_unlock1:
- spin_unlock(&ep->lock);
- scif_put_peer_dev(spdev);
- if (err) {
- return err;
- } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
- flush_work(&scif_info.conn_work);
- err = ep->conn_err;
- spin_lock(&ep->lock);
- ep->conn_async_state = ASYNC_CONN_IDLE;
- spin_unlock(&ep->lock);
- } else {
- err = scif_conn_func(ep);
- }
- return err;
-}
-
-int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
-{
- return __scif_connect(epd, dst, false);
-}
-EXPORT_SYMBOL_GPL(scif_connect);
-
-/*
- * scif_accept() - Accept a connection request from the remote node
- *
- * The function accepts a connection request from the remote node. Successful
- * complete is indicate by a new end point being created and passed back
- * to the caller for future reference.
- *
- * Upon successful complete a zero will be returned and the peer information
- * will be filled in.
- *
- * If the end point is not in the listening state -EINVAL will be returned.
- *
- * If during the connection sequence resource allocation fails the -ENOMEM
- * will be returned.
- *
- * If the function is called with the ASYNC flag set and no connection requests
- * are pending it will return -EAGAIN.
- *
- * If the remote side is not sending any connection requests the caller may
- * terminate this function with a signal. If so a -EINTR will be returned.
- */
-int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
- scif_epd_t *newepd, int flags)
-{
- struct scif_endpt *lep = (struct scif_endpt *)epd;
- struct scif_endpt *cep;
- struct scif_conreq *conreq;
- struct scifmsg msg;
- int err;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]);
-
- if (flags & ~SCIF_ACCEPT_SYNC)
- return -EINVAL;
-
- if (!peer || !newepd)
- return -EINVAL;
-
- might_sleep();
- spin_lock(&lep->lock);
- if (lep->state != SCIFEP_LISTENING) {
- spin_unlock(&lep->lock);
- return -EINVAL;
- }
-
- if (!lep->conreqcnt && !(flags & SCIF_ACCEPT_SYNC)) {
- /* No connection request present and we do not want to wait */
- spin_unlock(&lep->lock);
- return -EAGAIN;
- }
-
- lep->files = current->files;
-retry_connection:
- spin_unlock(&lep->lock);
- /* Wait for the remote node to send us a SCIF_CNCT_REQ */
- err = wait_event_interruptible(lep->conwq,
- (lep->conreqcnt ||
- (lep->state != SCIFEP_LISTENING)));
- if (err)
- return err;
-
- if (lep->state != SCIFEP_LISTENING)
- return -EINTR;
-
- spin_lock(&lep->lock);
-
- if (!lep->conreqcnt)
- goto retry_connection;
-
- /* Get the first connect request off the list */
- conreq = list_first_entry(&lep->conlist, struct scif_conreq, list);
- list_del(&conreq->list);
- lep->conreqcnt--;
- spin_unlock(&lep->lock);
-
- /* Fill in the peer information */
- peer->node = conreq->msg.src.node;
- peer->port = conreq->msg.src.port;
-
- cep = kzalloc(sizeof(*cep), GFP_KERNEL);
- if (!cep) {
- err = -ENOMEM;
- goto scif_accept_error_epalloc;
- }
- spin_lock_init(&cep->lock);
- mutex_init(&cep->sendlock);
- mutex_init(&cep->recvlock);
- cep->state = SCIFEP_CONNECTING;
- cep->remote_dev = &scif_dev[peer->node];
- cep->remote_ep = conreq->msg.payload[0];
-
- scif_rma_ep_init(cep);
-
- err = scif_reserve_dma_chan(cep);
- if (err) {
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto scif_accept_error_qpalloc;
- }
-
- cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
- if (!cep->qp_info.qp) {
- err = -ENOMEM;
- goto scif_accept_error_qpalloc;
- }
-
- err = scif_anon_inode_getfile(cep);
- if (err)
- goto scif_accept_error_anon_inode;
-
- cep->qp_info.qp->magic = SCIFEP_MAGIC;
- spdev = scif_get_peer_dev(cep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- goto scif_accept_error_map;
- }
- err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset,
- conreq->msg.payload[1], SCIF_ENDPT_QP_SIZE,
- cep->remote_dev);
- if (err) {
- dev_dbg(&cep->remote_dev->sdev->dev,
- "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n",
- lep, cep, err, cep->qp_info.qp_offset);
- scif_put_peer_dev(spdev);
- goto scif_accept_error_map;
- }
-
- cep->port.node = lep->port.node;
- cep->port.port = lep->port.port;
- cep->peer.node = peer->node;
- cep->peer.port = peer->port;
- init_waitqueue_head(&cep->sendwq);
- init_waitqueue_head(&cep->recvwq);
- init_waitqueue_head(&cep->conwq);
-
- msg.uop = SCIF_CNCT_GNT;
- msg.src = cep->port;
- msg.payload[0] = cep->remote_ep;
- msg.payload[1] = cep->qp_info.qp_offset;
- msg.payload[2] = (u64)cep;
-
- err = _scif_nodeqp_send(cep->remote_dev, &msg);
- scif_put_peer_dev(spdev);
- if (err)
- goto scif_accept_error_map;
-retry:
- /* Wait for the remote node to respond with SCIF_CNCT_GNT(N)ACK */
- err = wait_event_timeout(cep->conwq, cep->state != SCIFEP_CONNECTING,
- SCIF_NODE_ACCEPT_TIMEOUT);
- if (!err && scifdev_alive(cep))
- goto retry;
- err = !err ? -ENODEV : 0;
- if (err)
- goto scif_accept_error_map;
- kfree(conreq);
-
- spin_lock(&cep->lock);
-
- if (cep->state == SCIFEP_CLOSING) {
- /*
- * Remote failed to allocate resources and NAKed the grant.
- * There is at this point nothing referencing the new end point.
- */
- spin_unlock(&cep->lock);
- scif_teardown_ep(cep);
- kfree(cep);
-
- /* If call with sync flag then go back and wait. */
- if (flags & SCIF_ACCEPT_SYNC) {
- spin_lock(&lep->lock);
- goto retry_connection;
- }
- return -EAGAIN;
- }
-
- scif_get_port(cep->port.port);
- *newepd = (scif_epd_t)cep;
- spin_unlock(&cep->lock);
- return 0;
-scif_accept_error_map:
- scif_anon_inode_fput(cep);
-scif_accept_error_anon_inode:
- scif_teardown_ep(cep);
-scif_accept_error_qpalloc:
- kfree(cep);
-scif_accept_error_epalloc:
- msg.uop = SCIF_CNCT_REJ;
- msg.dst.node = conreq->msg.src.node;
- msg.dst.port = conreq->msg.src.port;
- msg.payload[0] = conreq->msg.payload[0];
- msg.payload[1] = conreq->msg.payload[1];
- scif_nodeqp_send(&scif_dev[conreq->msg.src.node], &msg);
- kfree(conreq);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_accept);
-
-/*
- * scif_msg_param_check:
- * @epd: The end point returned from scif_open()
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * Validate parameters for messaging APIs scif_send(..)/scif_recv(..).
- */
-static inline int scif_msg_param_check(scif_epd_t epd, int len, int flags)
-{
- int ret = -EINVAL;
-
- if (len < 0)
- goto err_ret;
- if (flags && (!(flags & SCIF_RECV_BLOCK)))
- goto err_ret;
- ret = 0;
-err_ret:
- return ret;
-}
-
-static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scifmsg notif_msg;
- int curr_xfer_len = 0, sent_len = 0, write_count;
- int ret = 0;
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (flags & SCIF_SEND_BLOCK)
- might_sleep();
-
- spin_lock(&ep->lock);
- while (sent_len != len && SCIFEP_CONNECTED == ep->state) {
- write_count = scif_rb_space(&qp->outbound_q);
- if (write_count) {
- /* Best effort to send as much data as possible */
- curr_xfer_len = min(len - sent_len, write_count);
- ret = scif_rb_write(&qp->outbound_q, msg,
- curr_xfer_len);
- if (ret < 0)
- break;
- /* Success. Update write pointer */
- scif_rb_commit(&qp->outbound_q);
- /*
- * Send a notification to the peer about the
- * produced data message.
- */
- notif_msg.src = ep->port;
- notif_msg.uop = SCIF_CLIENT_SENT;
- notif_msg.payload[0] = ep->remote_ep;
- ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg);
- if (ret)
- break;
- sent_len += curr_xfer_len;
- msg = msg + curr_xfer_len;
- continue;
- }
- curr_xfer_len = min(len - sent_len, SCIF_ENDPT_QP_SIZE - 1);
- /* Not enough RB space. return for the Non Blocking case */
- if (!(flags & SCIF_SEND_BLOCK))
- break;
-
- spin_unlock(&ep->lock);
- /* Wait for a SCIF_CLIENT_RCVD message in the Blocking case */
- ret =
- wait_event_interruptible(ep->sendwq,
- (SCIFEP_CONNECTED != ep->state) ||
- (scif_rb_space(&qp->outbound_q) >=
- curr_xfer_len));
- spin_lock(&ep->lock);
- if (ret)
- break;
- }
- if (sent_len)
- ret = sent_len;
- else if (!ret && SCIFEP_CONNECTED != ep->state)
- ret = SCIFEP_DISCONNECTED == ep->state ?
- -ECONNRESET : -ENOTCONN;
- spin_unlock(&ep->lock);
- return ret;
-}
-
-static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scifmsg notif_msg;
- int curr_recv_len = 0, remaining_len = len, read_count;
- int ret = 0;
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (flags & SCIF_RECV_BLOCK)
- might_sleep();
- spin_lock(&ep->lock);
- while (remaining_len && (SCIFEP_CONNECTED == ep->state ||
- SCIFEP_DISCONNECTED == ep->state)) {
- read_count = scif_rb_count(&qp->inbound_q, remaining_len);
- if (read_count) {
- /*
- * Best effort to recv as much data as there
- * are bytes to read in the RB particularly
- * important for the Non Blocking case.
- */
- curr_recv_len = min(remaining_len, read_count);
- scif_rb_get_next(&qp->inbound_q, msg, curr_recv_len);
- if (ep->state == SCIFEP_CONNECTED) {
- /*
- * Update the read pointer only if the endpoint
- * is still connected else the read pointer
- * might no longer exist since the peer has
- * freed resources!
- */
- scif_rb_update_read_ptr(&qp->inbound_q);
- /*
- * Send a notification to the peer about the
- * consumed data message only if the EP is in
- * SCIFEP_CONNECTED state.
- */
- notif_msg.src = ep->port;
- notif_msg.uop = SCIF_CLIENT_RCVD;
- notif_msg.payload[0] = ep->remote_ep;
- ret = _scif_nodeqp_send(ep->remote_dev,
- &notif_msg);
- if (ret)
- break;
- }
- remaining_len -= curr_recv_len;
- msg = msg + curr_recv_len;
- continue;
- }
- /*
- * Bail out now if the EP is in SCIFEP_DISCONNECTED state else
- * we will keep looping forever.
- */
- if (ep->state == SCIFEP_DISCONNECTED)
- break;
- /*
- * Return in the Non Blocking case if there is no data
- * to read in this iteration.
- */
- if (!(flags & SCIF_RECV_BLOCK))
- break;
- curr_recv_len = min(remaining_len, SCIF_ENDPT_QP_SIZE - 1);
- spin_unlock(&ep->lock);
- /*
- * Wait for a SCIF_CLIENT_SEND message in the blocking case
- * or until other side disconnects.
- */
- ret =
- wait_event_interruptible(ep->recvwq,
- SCIFEP_CONNECTED != ep->state ||
- scif_rb_count(&qp->inbound_q,
- curr_recv_len)
- >= curr_recv_len);
- spin_lock(&ep->lock);
- if (ret)
- break;
- }
- if (len - remaining_len)
- ret = len - remaining_len;
- else if (!ret && ep->state != SCIFEP_CONNECTED)
- ret = ep->state == SCIFEP_DISCONNECTED ?
- -ECONNRESET : -ENOTCONN;
- spin_unlock(&ep->lock);
- return ret;
-}
-
-/**
- * scif_user_send() - Send data to connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the driver IOCTL entry point
- * only and is a wrapper for _scif_send().
- */
-int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
- int sent_len = 0;
- char *tmp;
- int loop_len;
- int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- err = scif_msg_param_check(epd, len, flags);
- if (err)
- goto send_err;
-
- tmp = kmalloc(chunk_len, GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto send_err;
- }
- /*
- * Grabbing the lock before breaking up the transfer in
- * multiple chunks is required to ensure that messages do
- * not get fragmented and reordered.
- */
- mutex_lock(&ep->sendlock);
- while (sent_len != len) {
- loop_len = len - sent_len;
- loop_len = min(chunk_len, loop_len);
- if (copy_from_user(tmp, msg, loop_len)) {
- err = -EFAULT;
- goto send_free_err;
- }
- err = _scif_send(epd, tmp, loop_len, flags);
- if (err < 0)
- goto send_free_err;
- sent_len += err;
- msg += err;
- if (err != loop_len)
- goto send_free_err;
- }
-send_free_err:
- mutex_unlock(&ep->sendlock);
- kfree(tmp);
-send_err:
- return err < 0 ? err : sent_len;
-}
-
-/**
- * scif_user_recv() - Receive data from connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the driver IOCTL entry point
- * only and is a wrapper for _scif_recv().
- */
-int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
- int recv_len = 0;
- char *tmp;
- int loop_len;
- int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- err = scif_msg_param_check(epd, len, flags);
- if (err)
- goto recv_err;
-
- tmp = kmalloc(chunk_len, GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto recv_err;
- }
- /*
- * Grabbing the lock before breaking up the transfer in
- * multiple chunks is required to ensure that messages do
- * not get fragmented and reordered.
- */
- mutex_lock(&ep->recvlock);
- while (recv_len != len) {
- loop_len = len - recv_len;
- loop_len = min(chunk_len, loop_len);
- err = _scif_recv(epd, tmp, loop_len, flags);
- if (err < 0)
- goto recv_free_err;
- if (copy_to_user(msg, tmp, err)) {
- err = -EFAULT;
- goto recv_free_err;
- }
- recv_len += err;
- msg += err;
- if (err != loop_len)
- goto recv_free_err;
- }
-recv_free_err:
- mutex_unlock(&ep->recvlock);
- kfree(tmp);
-recv_err:
- return err < 0 ? err : recv_len;
-}
-
-/**
- * scif_send() - Send data to connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the kernel mode only and is
- * a wrapper for _scif_send().
- */
-int scif_send(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int ret;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- ret = scif_msg_param_check(epd, len, flags);
- if (ret)
- return ret;
- if (!ep->remote_dev)
- return -ENOTCONN;
- /*
- * Grab the mutex lock in the blocking case only
- * to ensure messages do not get fragmented/reordered.
- * The non blocking mode is protected using spin locks
- * in _scif_send().
- */
- if (flags & SCIF_SEND_BLOCK)
- mutex_lock(&ep->sendlock);
-
- ret = _scif_send(epd, msg, len, flags);
-
- if (flags & SCIF_SEND_BLOCK)
- mutex_unlock(&ep->sendlock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(scif_send);
-
-/**
- * scif_recv() - Receive data from connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the kernel mode only and is
- * a wrapper for _scif_recv().
- */
-int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int ret;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- ret = scif_msg_param_check(epd, len, flags);
- if (ret)
- return ret;
- /*
- * Grab the mutex lock in the blocking case only
- * to ensure messages do not get fragmented/reordered.
- * The non blocking mode is protected using spin locks
- * in _scif_send().
- */
- if (flags & SCIF_RECV_BLOCK)
- mutex_lock(&ep->recvlock);
-
- ret = _scif_recv(epd, msg, len, flags);
-
- if (flags & SCIF_RECV_BLOCK)
- mutex_unlock(&ep->recvlock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(scif_recv);
-
-static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
- poll_table *p, struct scif_endpt *ep)
-{
- /*
- * Because poll_wait makes a GFP_KERNEL allocation, give up the lock
- * and regrab it afterwards. Because the endpoint state might have
- * changed while the lock was given up, the state must be checked
- * again after re-acquiring the lock. The code in __scif_pollfd(..)
- * does this.
- */
- spin_unlock(&ep->lock);
- poll_wait(f, wq, p);
- spin_lock(&ep->lock);
-}
-
-__poll_t
-__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
-{
- __poll_t mask = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
-
- spin_lock(&ep->lock);
-
- /* Endpoint is waiting for a non-blocking connect to complete */
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- _scif_poll_wait(f, &ep->conn_pend_wq, wait, ep);
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- if (ep->state == SCIFEP_CONNECTED ||
- ep->state == SCIFEP_DISCONNECTED ||
- ep->conn_err)
- mask |= EPOLLOUT;
- goto exit;
- }
- }
-
- /* Endpoint is listening for incoming connection requests */
- if (ep->state == SCIFEP_LISTENING) {
- _scif_poll_wait(f, &ep->conwq, wait, ep);
- if (ep->state == SCIFEP_LISTENING) {
- if (ep->conreqcnt)
- mask |= EPOLLIN;
- goto exit;
- }
- }
-
- /* Endpoint is connected or disconnected */
- if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) {
- if (poll_requested_events(wait) & EPOLLIN)
- _scif_poll_wait(f, &ep->recvwq, wait, ep);
- if (poll_requested_events(wait) & EPOLLOUT)
- _scif_poll_wait(f, &ep->sendwq, wait, ep);
- if (ep->state == SCIFEP_CONNECTED ||
- ep->state == SCIFEP_DISCONNECTED) {
- /* Data can be read without blocking */
- if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1))
- mask |= EPOLLIN;
- /* Data can be written without blocking */
- if (scif_rb_space(&ep->qp_info.qp->outbound_q))
- mask |= EPOLLOUT;
- /* Return EPOLLHUP if endpoint is disconnected */
- if (ep->state == SCIFEP_DISCONNECTED)
- mask |= EPOLLHUP;
- goto exit;
- }
- }
-
- /* Return EPOLLERR if the endpoint is in none of the above states */
- mask |= EPOLLERR;
-exit:
- spin_unlock(&ep->lock);
- return mask;
-}
-
-/**
- * scif_poll() - Kernel mode SCIF poll
- * @ufds: Array of scif_pollepd structures containing the end points
- * and events to poll on
- * @nfds: Size of the ufds array
- * @timeout_msecs: Timeout in msecs, -ve implies infinite timeout
- *
- * The code flow in this function is based on do_poll(..) in select.c
- *
- * Returns the number of endpoints which have pending events or 0 in
- * the event of a timeout. If a signal is used for wake up, -EINTR is
- * returned.
- */
-int
-scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
-{
- struct poll_wqueues table;
- poll_table *pt;
- int i, count = 0, timed_out = timeout_msecs == 0;
- __poll_t mask;
- u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
- : msecs_to_jiffies(timeout_msecs);
-
- poll_initwait(&table);
- pt = &table.pt;
- while (1) {
- for (i = 0; i < nfds; i++) {
- pt->_key = ufds[i].events | EPOLLERR | EPOLLHUP;
- mask = __scif_pollfd(ufds[i].epd->anon,
- pt, ufds[i].epd);
- mask &= ufds[i].events | EPOLLERR | EPOLLHUP;
- if (mask) {
- count++;
- pt->_qproc = NULL;
- }
- ufds[i].revents = mask;
- }
- pt->_qproc = NULL;
- if (!count) {
- count = table.error;
- if (signal_pending(current))
- count = -EINTR;
- }
- if (count || timed_out)
- break;
-
- if (!schedule_timeout_interruptible(timeout))
- timed_out = 1;
- }
- poll_freewait(&table);
- return count;
-}
-EXPORT_SYMBOL_GPL(scif_poll);
-
-int scif_get_node_ids(u16 *nodes, int len, u16 *self)
-{
- int online = 0;
- int offset = 0;
- int node;
-
- if (!scif_is_mgmt_node())
- scif_get_node_info();
-
- *self = scif_info.nodeid;
- mutex_lock(&scif_info.conflock);
- len = min_t(int, len, scif_info.total);
- for (node = 0; node <= scif_info.maxid; node++) {
- if (_scifdev_alive(&scif_dev[node])) {
- online++;
- if (offset < len)
- nodes[offset++] = node;
- }
- }
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI get_node_ids total %d online %d filled in %d nodes\n",
- scif_info.total, online, offset);
- mutex_unlock(&scif_info.conflock);
-
- return online;
-}
-EXPORT_SYMBOL_GPL(scif_get_node_ids);
-
-static int scif_add_client_dev(struct device *dev, struct subsys_interface *si)
-{
- struct scif_client *client =
- container_of(si, struct scif_client, si);
- struct scif_peer_dev *spdev =
- container_of(dev, struct scif_peer_dev, dev);
-
- if (client->probe)
- client->probe(spdev);
- return 0;
-}
-
-static void scif_remove_client_dev(struct device *dev,
- struct subsys_interface *si)
-{
- struct scif_client *client =
- container_of(si, struct scif_client, si);
- struct scif_peer_dev *spdev =
- container_of(dev, struct scif_peer_dev, dev);
-
- if (client->remove)
- client->remove(spdev);
-}
-
-void scif_client_unregister(struct scif_client *client)
-{
- subsys_interface_unregister(&client->si);
-}
-EXPORT_SYMBOL_GPL(scif_client_unregister);
-
-int scif_client_register(struct scif_client *client)
-{
- struct subsys_interface *si = &client->si;
-
- si->name = client->name;
- si->subsys = &scif_peer_bus;
- si->add_dev = scif_add_client_dev;
- si->remove_dev = scif_remove_client_dev;
-
- return subsys_interface_register(&client->si);
-}
-EXPORT_SYMBOL_GPL(scif_client_register);
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
deleted file mode 100644
index 8fe38e7ca6e6..000000000000
--- a/drivers/misc/mic/scif/scif_debugfs.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include "../common/mic_dev.h"
-#include "scif_main.h"
-
-/* Debugfs parent dir */
-static struct dentry *scif_dbg;
-
-static int scif_dev_show(struct seq_file *s, void *unused)
-{
- int node;
-
- seq_printf(s, "Total Nodes %d Self Node Id %d Maxid %d\n",
- scif_info.total, scif_info.nodeid,
- scif_info.maxid);
-
- if (!scif_dev)
- return 0;
-
- seq_printf(s, "%-16s\t%-16s\n", "node_id", "state");
-
- for (node = 0; node <= scif_info.maxid; node++)
- seq_printf(s, "%-16d\t%-16s\n", scif_dev[node].node,
- _scifdev_alive(&scif_dev[node]) ?
- "Running" : "Offline");
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(scif_dev);
-
-static void scif_display_window(struct scif_window *window, struct seq_file *s)
-{
- int j;
- struct scatterlist *sg;
- scif_pinned_pages_t pin = window->pinned_pages;
-
- seq_printf(s, "window %p type %d temp %d offset 0x%llx ",
- window, window->type, window->temp, window->offset);
- seq_printf(s, "nr_pages 0x%llx nr_contig_chunks 0x%x prot %d ",
- window->nr_pages, window->nr_contig_chunks, window->prot);
- seq_printf(s, "ref_count %d magic 0x%llx peer_window 0x%llx ",
- window->ref_count, window->magic, window->peer_window);
- seq_printf(s, "unreg_state 0x%x va_for_temp 0x%lx\n",
- window->unreg_state, window->va_for_temp);
-
- for (j = 0; j < window->nr_contig_chunks; j++)
- seq_printf(s, "page[%d] dma_addr 0x%llx num_pages 0x%llx\n", j,
- window->dma_addr[j], window->num_pages[j]);
-
- if (window->type == SCIF_WINDOW_SELF && pin)
- for (j = 0; j < window->nr_pages; j++)
- seq_printf(s, "page[%d] = pinned_pages %p address %p\n",
- j, pin->pages[j],
- page_address(pin->pages[j]));
-
- if (window->st)
- for_each_sg(window->st->sgl, sg, window->st->nents, j)
- seq_printf(s, "sg[%d] dma addr 0x%llx length 0x%x\n",
- j, sg_dma_address(sg), sg_dma_len(sg));
-}
-
-static void scif_display_all_windows(struct list_head *head, struct seq_file *s)
-{
- struct list_head *item;
- struct scif_window *window;
-
- list_for_each(item, head) {
- window = list_entry(item, struct scif_window, list);
- scif_display_window(window, s);
- }
-}
-
-static int scif_rma_show(struct seq_file *s, void *unused)
-{
- struct scif_endpt *ep;
- struct list_head *pos;
-
- mutex_lock(&scif_info.connlock);
- list_for_each(pos, &scif_info.connected) {
- ep = list_entry(pos, struct scif_endpt, list);
- seq_printf(s, "ep %p self windows\n", ep);
- mutex_lock(&ep->rma_info.rma_lock);
- scif_display_all_windows(&ep->rma_info.reg_list, s);
- seq_printf(s, "ep %p remote windows\n", ep);
- scif_display_all_windows(&ep->rma_info.remote_reg_list, s);
- mutex_unlock(&ep->rma_info.rma_lock);
- }
- mutex_unlock(&scif_info.connlock);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(scif_rma);
-
-void __init scif_init_debugfs(void)
-{
- scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- debugfs_create_file("scif_dev", 0444, scif_dbg, NULL, &scif_dev_fops);
- debugfs_create_file("scif_rma", 0444, scif_dbg, NULL, &scif_rma_fops);
- debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
- debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
-}
-
-void scif_exit_debugfs(void)
-{
- debugfs_remove_recursive(scif_dbg);
-}
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
deleted file mode 100644
index 401b98e5ad79..000000000000
--- a/drivers/misc/mic/scif/scif_dma.c
+++ /dev/null
@@ -1,1940 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include "scif_map.h"
-
-/*
- * struct scif_dma_comp_cb - SCIF DMA completion callback
- *
- * @dma_completion_func: DMA completion callback
- * @cb_cookie: DMA completion callback cookie
- * @temp_buf: Temporary buffer
- * @temp_buf_to_free: Temporary buffer to be freed
- * @is_cache: Is a kmem_cache allocated buffer
- * @dst_offset: Destination registration offset
- * @dst_window: Destination registration window
- * @len: Length of the temp buffer
- * @temp_phys: DMA address of the temp buffer
- * @sdev: The SCIF device
- * @header_padding: padding for cache line alignment
- */
-struct scif_dma_comp_cb {
- void (*dma_completion_func)(void *cookie);
- void *cb_cookie;
- u8 *temp_buf;
- u8 *temp_buf_to_free;
- bool is_cache;
- s64 dst_offset;
- struct scif_window *dst_window;
- size_t len;
- dma_addr_t temp_phys;
- struct scif_dev *sdev;
- int header_padding;
-};
-
-/**
- * struct scif_copy_work - Work for DMA copy
- *
- * @src_offset: Starting source offset
- * @dst_offset: Starting destination offset
- * @src_window: Starting src registered window
- * @dst_window: Starting dst registered window
- * @loopback: true if this is a loopback DMA transfer
- * @len: Length of the transfer
- * @comp_cb: DMA copy completion callback
- * @remote_dev: The remote SCIF peer device
- * @fence_type: polling or interrupt based
- * @ordered: is this a tail byte ordered DMA transfer
- */
-struct scif_copy_work {
- s64 src_offset;
- s64 dst_offset;
- struct scif_window *src_window;
- struct scif_window *dst_window;
- int loopback;
- size_t len;
- struct scif_dma_comp_cb *comp_cb;
- struct scif_dev *remote_dev;
- int fence_type;
- bool ordered;
-};
-
-/**
- * scif_reserve_dma_chan:
- * @ep: Endpoint Descriptor.
- *
- * This routine reserves a DMA channel for a particular
- * endpoint. All DMA transfers for an endpoint are always
- * programmed on the same DMA channel.
- */
-int scif_reserve_dma_chan(struct scif_endpt *ep)
-{
- int err = 0;
- struct scif_dev *scifdev;
- struct scif_hw_dev *sdev;
- struct dma_chan *chan;
-
- /* Loopback DMAs are not supported on the management node */
- if (!scif_info.nodeid && scifdev_self(ep->remote_dev))
- return 0;
- if (scif_info.nodeid)
- scifdev = &scif_dev[0];
- else
- scifdev = ep->remote_dev;
- sdev = scifdev->sdev;
- if (!sdev->num_dma_ch)
- return -ENODEV;
- chan = sdev->dma_ch[scifdev->dma_ch_idx];
- scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch;
- mutex_lock(&ep->rma_info.rma_lock);
- ep->rma_info.dma_chan = chan;
- mutex_unlock(&ep->rma_info.rma_lock);
- return err;
-}
-
-#ifdef CONFIG_MMU_NOTIFIER
-/*
- * scif_rma_destroy_tcw:
- *
- * This routine destroys temporary cached windows
- */
-static
-void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
- u64 start, u64 len)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- u64 start_va, end_va;
- u64 end = start + len;
-
- if (end <= start)
- return;
-
- list_for_each_safe(item, tmp, &mmn->tc_reg_list) {
- window = list_entry(item, struct scif_window, list);
- if (!len)
- break;
- start_va = window->va_for_temp;
- end_va = start_va + (window->nr_pages << PAGE_SHIFT);
- if (start < start_va && end <= start_va)
- break;
- if (start >= end_va)
- continue;
- __scif_rma_destroy_tcw_helper(window);
- }
-}
-
-static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len)
-{
- struct scif_endpt *ep = mmn->ep;
-
- spin_lock(&ep->rma_info.tc_lock);
- __scif_rma_destroy_tcw(mmn, start, len);
- spin_unlock(&ep->rma_info.tc_lock);
-}
-
-static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
-{
- struct list_head *item, *tmp;
- struct scif_mmu_notif *mmn;
-
- list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
- scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
- }
-}
-
-static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
-{
- struct list_head *item, *tmp;
- struct scif_mmu_notif *mmn;
-
- spin_lock(&ep->rma_info.tc_lock);
- list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
- __scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
- }
- spin_unlock(&ep->rma_info.tc_lock);
-}
-
-static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
-{
- if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit)
- return false;
- if ((atomic_read(&ep->rma_info.tcw_total_pages)
- + (cur_bytes >> PAGE_SHIFT)) >
- scif_info.rma_tc_limit) {
- dev_info(scif_info.mdev.this_device,
- "%s %d total=%d, current=%zu reached max\n",
- __func__, __LINE__,
- atomic_read(&ep->rma_info.tcw_total_pages),
- (1 + (cur_bytes >> PAGE_SHIFT)));
- scif_rma_destroy_tcw_invalid();
- __scif_rma_destroy_tcw_ep(ep);
- }
- return true;
-}
-
-static void scif_mmu_notifier_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct scif_mmu_notif *mmn;
-
- mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
- scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
- schedule_work(&scif_info.misc_work);
-}
-
-static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct scif_mmu_notif *mmn;
-
- mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
- scif_rma_destroy_tcw(mmn, range->start, range->end - range->start);
-
- return 0;
-}
-
-static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- /*
- * Nothing to do here, everything needed was done in
- * invalidate_range_start.
- */
-}
-
-static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
- .release = scif_mmu_notifier_release,
- .clear_flush_young = NULL,
- .invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
- .invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
-
-static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep)
-{
- struct scif_endpt_rma_info *rma = &ep->rma_info;
- struct scif_mmu_notif *mmn = NULL;
- struct list_head *item, *tmp;
-
- mutex_lock(&ep->rma_info.mmn_lock);
- list_for_each_safe(item, tmp, &rma->mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
- mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm);
- list_del(item);
- kfree(mmn);
- }
- mutex_unlock(&ep->rma_info.mmn_lock);
-}
-
-static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn,
- struct mm_struct *mm, struct scif_endpt *ep)
-{
- mmn->ep = ep;
- mmn->mm = mm;
- mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops;
- INIT_LIST_HEAD(&mmn->list);
- INIT_LIST_HEAD(&mmn->tc_reg_list);
-}
-
-static struct scif_mmu_notif *
-scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
-{
- struct scif_mmu_notif *mmn;
-
- list_for_each_entry(mmn, &rma->mmn_list, list)
- if (mmn->mm == mm)
- return mmn;
- return NULL;
-}
-
-static struct scif_mmu_notif *
-scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
-{
- struct scif_mmu_notif *mmn
- = kzalloc(sizeof(*mmn), GFP_KERNEL);
-
- if (!mmn)
- return ERR_PTR(-ENOMEM);
-
- scif_init_mmu_notifier(mmn, current->mm, ep);
- if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
- kfree(mmn);
- return ERR_PTR(-EBUSY);
- }
- list_add(&mmn->list, &ep->rma_info.mmn_list);
- return mmn;
-}
-
-/*
- * Called from the misc thread to destroy temporary cached windows and
- * unregister the MMU notifier for the SCIF endpoint.
- */
-void scif_mmu_notif_handler(struct work_struct *work)
-{
- struct list_head *pos, *tmpq;
- struct scif_endpt *ep;
-restart:
- scif_rma_destroy_tcw_invalid();
- spin_lock(&scif_info.rmalock);
- list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) {
- ep = list_entry(pos, struct scif_endpt, mmu_list);
- list_del(&ep->mmu_list);
- spin_unlock(&scif_info.rmalock);
- scif_rma_destroy_tcw_ep(ep);
- scif_ep_unregister_mmu_notifier(ep);
- goto restart;
- }
- spin_unlock(&scif_info.rmalock);
-}
-
-static bool scif_is_set_reg_cache(int flags)
-{
- return !!(flags & SCIF_RMA_USECACHE);
-}
-#else
-static struct scif_mmu_notif *
-scif_find_mmu_notifier(struct mm_struct *mm,
- struct scif_endpt_rma_info *rma)
-{
- return NULL;
-}
-
-static struct scif_mmu_notif *
-scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
-{
- return NULL;
-}
-
-void scif_mmu_notif_handler(struct work_struct *work)
-{
-}
-
-static bool scif_is_set_reg_cache(int flags)
-{
- return false;
-}
-
-static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
-{
- return false;
-}
-#endif
-
-/**
- * scif_register_temp:
- * @epd: End Point Descriptor.
- * @addr: virtual address to/from which to copy
- * @len: length of range to copy
- * @prot: read/write protection
- * @out_offset: computed offset returned by reference.
- * @out_window: allocated registered window returned by reference.
- *
- * Create a temporary registered window. The peer will not know about this
- * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's.
- */
-static int
-scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot,
- off_t *out_offset, struct scif_window **out_window)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err;
- scif_pinned_pages_t pinned_pages;
- size_t aligned_len;
-
- aligned_len = ALIGN(len, PAGE_SIZE);
-
- err = __scif_pin_pages((void *)(addr & PAGE_MASK),
- aligned_len, &prot, 0, &pinned_pages);
- if (err)
- return err;
-
- pinned_pages->prot = prot;
-
- /* Compute the offset for this registration */
- err = scif_get_window_offset(ep, 0, 0,
- aligned_len >> PAGE_SHIFT,
- (s64 *)out_offset);
- if (err)
- goto error_unpin;
-
- /* Allocate and prepare self registration window */
- *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT,
- *out_offset, true);
- if (!*out_window) {
- scif_free_window_offset(ep, NULL, *out_offset);
- err = -ENOMEM;
- goto error_unpin;
- }
-
- (*out_window)->pinned_pages = pinned_pages;
- (*out_window)->nr_pages = pinned_pages->nr_pages;
- (*out_window)->prot = pinned_pages->prot;
-
- (*out_window)->va_for_temp = addr & PAGE_MASK;
- err = scif_map_window(ep->remote_dev, *out_window);
- if (err) {
- /* Something went wrong! Rollback */
- scif_destroy_window(ep, *out_window);
- *out_window = NULL;
- } else {
- *out_offset |= (addr - (*out_window)->va_for_temp);
- }
- return err;
-error_unpin:
- if (err)
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- scif_unpin_pages(pinned_pages);
- return err;
-}
-
-#define SCIF_DMA_TO (3 * HZ)
-
-/*
- * scif_sync_dma - Program a DMA without an interrupt descriptor
- *
- * @dev - The address of the pointer to the device instance used
- * for DMA registration.
- * @chan - DMA channel to be used.
- * @sync_wait: Wait for DMA to complete?
- *
- * Return 0 on success and -errno on error.
- */
-static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan,
- bool sync_wait)
-{
- int err = 0;
- struct dma_async_tx_descriptor *tx = NULL;
- enum dma_ctrl_flags flags = DMA_PREP_FENCE;
- dma_cookie_t cookie;
- struct dma_device *ddev;
-
- if (!chan) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- ddev = chan->device;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- cookie = tx->tx_submit(tx);
-
- if (dma_submit_error(cookie)) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- if (!sync_wait) {
- dma_async_issue_pending(chan);
- } else {
- if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) {
- err = 0;
- } else {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- }
- }
-release:
- return err;
-}
-
-static void scif_dma_callback(void *arg)
-{
- struct completion *done = (struct completion *)arg;
-
- complete(done);
-}
-
-#define SCIF_DMA_SYNC_WAIT true
-#define SCIF_DMA_POLL BIT(0)
-#define SCIF_DMA_INTR BIT(1)
-
-/*
- * scif_async_dma - Program a DMA with an interrupt descriptor
- *
- * @dev - The address of the pointer to the device instance used
- * for DMA registration.
- * @chan - DMA channel to be used.
- * Return 0 on success and -errno on error.
- */
-static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan)
-{
- int err = 0;
- struct dma_device *ddev;
- struct dma_async_tx_descriptor *tx = NULL;
- enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
- DECLARE_COMPLETION_ONSTACK(done_wait);
- dma_cookie_t cookie;
- enum dma_status status;
-
- if (!chan) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- ddev = chan->device;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- reinit_completion(&done_wait);
- tx->callback = scif_dma_callback;
- tx->callback_param = &done_wait;
- cookie = tx->tx_submit(tx);
-
- if (dma_submit_error(cookie)) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- dma_async_issue_pending(chan);
-
- err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO);
- if (!err) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- err = 0;
- status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (status != DMA_COMPLETE) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
-release:
- return err;
-}
-
-/*
- * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular
- * DMA channel via polling.
- *
- * @sdev - The SCIF device
- * @chan - DMA channel
- * Return 0 on success and -errno on error.
- */
-static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan)
-{
- if (!chan)
- return -EINVAL;
- return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT);
-}
-
-/*
- * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular
- * DMA channel via interrupt based blocking wait.
- *
- * @sdev - The SCIF device
- * @chan - DMA channel
- * Return 0 on success and -errno on error.
- */
-int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan)
-{
- if (!chan)
- return -EINVAL;
- return scif_async_dma(sdev, chan);
-}
-
-/**
- * scif_rma_destroy_windows:
- *
- * This routine destroys all windows queued for cleanup
- */
-void scif_rma_destroy_windows(void)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep;
- struct dma_chan *chan;
-
- might_sleep();
-restart:
- spin_lock(&scif_info.rmalock);
- list_for_each_safe(item, tmp, &scif_info.rma) {
- window = list_entry(item, struct scif_window,
- list);
- ep = (struct scif_endpt *)window->ep;
- chan = ep->rma_info.dma_chan;
-
- list_del_init(&window->list);
- spin_unlock(&scif_info.rmalock);
- if (!chan || !scifdev_alive(ep) ||
- !scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan))
- /* Remove window from global list */
- window->unreg_state = OP_COMPLETED;
- else
- dev_warn(&ep->remote_dev->sdev->dev,
- "DMA engine hung?\n");
- if (window->unreg_state == OP_COMPLETED) {
- if (window->type == SCIF_WINDOW_SELF)
- scif_destroy_window(ep, window);
- else
- scif_destroy_remote_window(window);
- atomic_dec(&ep->rma_info.tw_refcount);
- }
- goto restart;
- }
- spin_unlock(&scif_info.rmalock);
-}
-
-/**
- * scif_rma_destroy_tcw:
- *
- * This routine destroys temporary cached registered windows
- * which have been queued for cleanup.
- */
-void scif_rma_destroy_tcw_invalid(void)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep;
- struct dma_chan *chan;
-
- might_sleep();
-restart:
- spin_lock(&scif_info.rmalock);
- list_for_each_safe(item, tmp, &scif_info.rma_tc) {
- window = list_entry(item, struct scif_window, list);
- ep = (struct scif_endpt *)window->ep;
- chan = ep->rma_info.dma_chan;
- list_del_init(&window->list);
- spin_unlock(&scif_info.rmalock);
- mutex_lock(&ep->rma_info.rma_lock);
- if (!chan || !scifdev_alive(ep) ||
- !scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan)) {
- atomic_sub(window->nr_pages,
- &ep->rma_info.tcw_total_pages);
- scif_destroy_window(ep, window);
- atomic_dec(&ep->rma_info.tcw_refcount);
- } else {
- dev_warn(&ep->remote_dev->sdev->dev,
- "DMA engine hung?\n");
- }
- mutex_unlock(&ep->rma_info.rma_lock);
- goto restart;
- }
- spin_unlock(&scif_info.rmalock);
-}
-
-static inline
-void *_get_local_va(off_t off, struct scif_window *window, size_t len)
-{
- int page_nr = (off - window->offset) >> PAGE_SHIFT;
- off_t page_off = off & ~PAGE_MASK;
- void *va = NULL;
-
- if (window->type == SCIF_WINDOW_SELF) {
- struct page **pages = window->pinned_pages->pages;
-
- va = page_address(pages[page_nr]) + page_off;
- }
- return va;
-}
-
-static inline
-void *ioremap_remote(off_t off, struct scif_window *window,
- size_t len, struct scif_dev *dev,
- struct scif_window_iter *iter)
-{
- dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter);
-
- /*
- * If the DMA address is not card relative then we need the DMA
- * addresses to be an offset into the bar. The aperture base was already
- * added so subtract it here since scif_ioremap is going to add it again
- */
- if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
- dev->sdev->aper && !dev->sdev->card_rel_da)
- phys = phys - dev->sdev->aper->pa;
- return scif_ioremap(phys, len, dev);
-}
-
-static inline void
-iounmap_remote(void *virt, size_t size, struct scif_copy_work *work)
-{
- scif_iounmap(virt, size, work->remote_dev);
-}
-
-/*
- * Takes care of ordering issue caused by
- * 1. Hardware: Only in the case of cpu copy from mgmt node to card
- * because of WC memory.
- * 2. Software: If memcpy reorders copy instructions for optimization.
- * This could happen at both mgmt node and card.
- */
-static inline void
-scif_ordered_memcpy_toio(char *dst, const char *src, size_t count)
-{
- if (!count)
- return;
-
- memcpy_toio((void __iomem __force *)dst, src, --count);
- /* Order the last byte with the previous stores */
- wmb();
- *(dst + count) = *(src + count);
-}
-
-static inline void scif_unaligned_cpy_toio(char *dst, const char *src,
- size_t count, bool ordered)
-{
- if (ordered)
- scif_ordered_memcpy_toio(dst, src, count);
- else
- memcpy_toio((void __iomem __force *)dst, src, count);
-}
-
-static inline
-void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count)
-{
- if (!count)
- return;
-
- memcpy_fromio(dst, (void __iomem __force *)src, --count);
- /* Order the last byte with the previous loads */
- rmb();
- *(dst + count) = *(src + count);
-}
-
-static inline void scif_unaligned_cpy_fromio(char *dst, const char *src,
- size_t count, bool ordered)
-{
- if (ordered)
- scif_ordered_memcpy_fromio(dst, src, count);
- else
- memcpy_fromio(dst, (void __iomem __force *)src, count);
-}
-
-#define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-/*
- * scif_off_to_dma_addr:
- * Obtain the dma_addr given the window and the offset.
- * @window: Registered window.
- * @off: Window offset.
- * @nr_bytes: Return the number of contiguous bytes till next DMA addr index.
- * @index: Return the index of the dma_addr array found.
- * @start_off: start offset of index of the dma addr array found.
- * The nr_bytes provides the callee an estimate of the maximum possible
- * DMA xfer possible while the index/start_off provide faster lookups
- * for the next iteration.
- */
-dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
- size_t *nr_bytes, struct scif_window_iter *iter)
-{
- int i, page_nr;
- s64 start, end;
- off_t page_off;
-
- if (window->nr_pages == window->nr_contig_chunks) {
- page_nr = (off - window->offset) >> PAGE_SHIFT;
- page_off = off & ~PAGE_MASK;
-
- if (nr_bytes)
- *nr_bytes = PAGE_SIZE - page_off;
- return window->dma_addr[page_nr] | page_off;
- }
- if (iter) {
- i = iter->index;
- start = iter->offset;
- } else {
- i = 0;
- start = window->offset;
- }
- for (; i < window->nr_contig_chunks; i++) {
- end = start + (window->num_pages[i] << PAGE_SHIFT);
- if (off >= start && off < end) {
- if (iter) {
- iter->index = i;
- iter->offset = start;
- }
- if (nr_bytes)
- *nr_bytes = end - off;
- return (window->dma_addr[i] + (off - start));
- }
- start += (window->num_pages[i] << PAGE_SHIFT);
- }
- dev_err(scif_info.mdev.this_device,
- "%s %d BUG. Addr not found? window %p off 0x%llx\n",
- __func__, __LINE__, window, off);
- return SCIF_RMA_ERROR_CODE;
-}
-
-/*
- * Copy between rma window and temporary buffer
- */
-static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
- u8 *temp, size_t rem_len, bool to_temp)
-{
- void *window_virt;
- size_t loop_len;
- int offset_in_page;
- s64 end_offset;
-
- offset_in_page = offset & ~PAGE_MASK;
- loop_len = PAGE_SIZE - offset_in_page;
-
- if (rem_len < loop_len)
- loop_len = rem_len;
-
- window_virt = _get_local_va(offset, window, loop_len);
- if (!window_virt)
- return;
- if (to_temp)
- memcpy(temp, window_virt, loop_len);
- else
- memcpy(window_virt, temp, loop_len);
-
- offset += loop_len;
- temp += loop_len;
- rem_len -= loop_len;
-
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- while (rem_len) {
- if (offset == end_offset) {
- window = list_next_entry(window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- }
- loop_len = min(PAGE_SIZE, rem_len);
- window_virt = _get_local_va(offset, window, loop_len);
- if (!window_virt)
- return;
- if (to_temp)
- memcpy(temp, window_virt, loop_len);
- else
- memcpy(window_virt, temp, loop_len);
- offset += loop_len;
- temp += loop_len;
- rem_len -= loop_len;
- }
-}
-
-/**
- * scif_rma_completion_cb:
- * @data: RMA cookie
- *
- * RMA interrupt completion callback.
- */
-static void scif_rma_completion_cb(void *data)
-{
- struct scif_dma_comp_cb *comp_cb = data;
-
- /* Free DMA Completion CB. */
- if (comp_cb->dst_window)
- scif_rma_local_cpu_copy(comp_cb->dst_offset,
- comp_cb->dst_window,
- comp_cb->temp_buf +
- comp_cb->header_padding,
- comp_cb->len, false);
- scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev,
- SCIF_KMEM_UNALIGNED_BUF_SIZE);
- if (comp_cb->is_cache)
- kmem_cache_free(unaligned_cache,
- comp_cb->temp_buf_to_free);
- else
- kfree(comp_cb->temp_buf_to_free);
-}
-
-/* Copies between temporary buffer and offsets provided in work */
-static int
-scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
- u8 *temp, struct dma_chan *chan,
- bool src_local)
-{
- struct scif_dma_comp_cb *comp_cb = work->comp_cb;
- dma_addr_t window_dma_addr, temp_dma_addr;
- dma_addr_t temp_phys = comp_cb->temp_phys;
- size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len;
- int offset_in_ca, ret = 0;
- s64 end_offset, offset;
- struct scif_window *window;
- void *window_virt_addr;
- size_t tail_len;
- struct dma_async_tx_descriptor *tx;
- struct dma_device *dev = chan->device;
- dma_cookie_t cookie;
-
- if (src_local) {
- offset = work->dst_offset;
- window = work->dst_window;
- } else {
- offset = work->src_offset;
- window = work->src_window;
- }
-
- offset_in_ca = offset & (L1_CACHE_BYTES - 1);
- if (offset_in_ca) {
- loop_len = L1_CACHE_BYTES - offset_in_ca;
- loop_len = min(loop_len, remaining_len);
- window_virt_addr = ioremap_remote(offset, window,
- loop_len,
- work->remote_dev,
- NULL);
- if (!window_virt_addr)
- return -ENOMEM;
- if (src_local)
- scif_unaligned_cpy_toio(window_virt_addr, temp,
- loop_len,
- work->ordered &&
- !(remaining_len - loop_len));
- else
- scif_unaligned_cpy_fromio(temp, window_virt_addr,
- loop_len, work->ordered &&
- !(remaining_len - loop_len));
- iounmap_remote(window_virt_addr, loop_len, work);
-
- offset += loop_len;
- temp += loop_len;
- temp_phys += loop_len;
- remaining_len -= loop_len;
- }
-
- offset_in_ca = offset & ~PAGE_MASK;
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
-
- tail_len = remaining_len & (L1_CACHE_BYTES - 1);
- remaining_len -= tail_len;
- while (remaining_len) {
- if (offset == end_offset) {
- window = list_next_entry(window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- }
- if (scif_is_mgmt_node())
- temp_dma_addr = temp_phys;
- else
- /* Fix if we ever enable IOMMU on the card */
- temp_dma_addr = (dma_addr_t)virt_to_phys(temp);
- window_dma_addr = scif_off_to_dma_addr(window, offset,
- &nr_contig_bytes,
- NULL);
- loop_len = min(nr_contig_bytes, remaining_len);
- if (src_local) {
- if (work->ordered && !tail_len &&
- !(remaining_len - loop_len) &&
- loop_len != L1_CACHE_BYTES) {
- /*
- * Break up the last chunk of the transfer into
- * two steps. if there is no tail to guarantee
- * DMA ordering. SCIF_DMA_POLLING inserts
- * a status update descriptor in step 1 which
- * acts as a double sided synchronization fence
- * for the DMA engine to ensure that the last
- * cache line in step 2 is updated last.
- */
- /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
- tx =
- dev->device_prep_dma_memcpy(chan,
- window_dma_addr,
- temp_dma_addr,
- loop_len -
- L1_CACHE_BYTES,
- DMA_PREP_FENCE);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- offset += (loop_len - L1_CACHE_BYTES);
- temp_dma_addr += (loop_len - L1_CACHE_BYTES);
- window_dma_addr += (loop_len - L1_CACHE_BYTES);
- remaining_len -= (loop_len - L1_CACHE_BYTES);
- loop_len = remaining_len;
-
- /* Step 2) DMA: L1_CACHE_BYTES */
- tx =
- dev->device_prep_dma_memcpy(chan,
- window_dma_addr,
- temp_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- } else {
- tx =
- dev->device_prep_dma_memcpy(chan,
- window_dma_addr,
- temp_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- }
- } else {
- tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr,
- window_dma_addr, loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- }
- offset += loop_len;
- temp += loop_len;
- temp_phys += loop_len;
- remaining_len -= loop_len;
- offset_in_ca = 0;
- }
- if (tail_len) {
- if (offset == end_offset) {
- window = list_next_entry(window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- }
- window_virt_addr = ioremap_remote(offset, window, tail_len,
- work->remote_dev,
- NULL);
- if (!window_virt_addr)
- return -ENOMEM;
- /*
- * The CPU copy for the tail bytes must be initiated only once
- * previous DMA transfers for this endpoint have completed
- * to guarantee ordering.
- */
- if (work->ordered) {
- struct scif_dev *rdev = work->remote_dev;
-
- ret = scif_drain_dma_intr(rdev->sdev, chan);
- if (ret)
- return ret;
- }
- if (src_local)
- scif_unaligned_cpy_toio(window_virt_addr, temp,
- tail_len, work->ordered);
- else
- scif_unaligned_cpy_fromio(temp, window_virt_addr,
- tail_len, work->ordered);
- iounmap_remote(window_virt_addr, tail_len, work);
- }
- tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT);
- if (!tx) {
- ret = -ENOMEM;
- return ret;
- }
- tx->callback = &scif_rma_completion_cb;
- tx->callback_param = comp_cb;
- cookie = tx->tx_submit(tx);
-
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- return ret;
- }
- dma_async_issue_pending(chan);
- return 0;
-err:
- dev_err(scif_info.mdev.this_device,
- "%s %d Desc Prog Failed ret %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-/*
- * _scif_rma_list_dma_copy_aligned:
- *
- * Traverse all the windows and perform DMA copy.
- */
-static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
- struct dma_chan *chan)
-{
- dma_addr_t src_dma_addr, dst_dma_addr;
- size_t loop_len, remaining_len, src_contig_bytes = 0;
- size_t dst_contig_bytes = 0;
- struct scif_window_iter src_win_iter;
- struct scif_window_iter dst_win_iter;
- s64 end_src_offset, end_dst_offset;
- struct scif_window *src_window = work->src_window;
- struct scif_window *dst_window = work->dst_window;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- int ret = 0;
- struct dma_async_tx_descriptor *tx;
- struct dma_device *dev = chan->device;
- dma_cookie_t cookie;
-
- remaining_len = work->len;
-
- scif_init_window_iter(src_window, &src_win_iter);
- scif_init_window_iter(dst_window, &dst_win_iter);
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- while (remaining_len) {
- if (src_offset == end_src_offset) {
- src_window = list_next_entry(src_window, list);
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(src_window, &src_win_iter);
- }
- if (dst_offset == end_dst_offset) {
- dst_window = list_next_entry(dst_window, list);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(dst_window, &dst_win_iter);
- }
-
- /* compute dma addresses for transfer */
- src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
- &src_contig_bytes,
- &src_win_iter);
- dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
- &dst_contig_bytes,
- &dst_win_iter);
- loop_len = min(src_contig_bytes, dst_contig_bytes);
- loop_len = min(loop_len, remaining_len);
- if (work->ordered && !(remaining_len - loop_len)) {
- /*
- * Break up the last chunk of the transfer into two
- * steps to ensure that the last byte in step 2 is
- * updated last.
- */
- /* Step 1) DMA: Body Length - 1 */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len - 1,
- DMA_PREP_FENCE);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- src_offset += (loop_len - 1);
- dst_offset += (loop_len - 1);
- src_dma_addr += (loop_len - 1);
- dst_dma_addr += (loop_len - 1);
- remaining_len -= (loop_len - 1);
- loop_len = remaining_len;
-
- /* Step 2) DMA: 1 BYTES */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr, loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- } else {
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr, loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- }
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- }
- return ret;
-err:
- dev_err(scif_info.mdev.this_device,
- "%s %d Desc Prog Failed ret %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-/*
- * scif_rma_list_dma_copy_aligned:
- *
- * Traverse all the windows and perform DMA copy.
- */
-static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
- struct dma_chan *chan)
-{
- dma_addr_t src_dma_addr, dst_dma_addr;
- size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0;
- size_t dst_contig_bytes = 0;
- int src_cache_off;
- s64 end_src_offset, end_dst_offset;
- struct scif_window_iter src_win_iter;
- struct scif_window_iter dst_win_iter;
- void *src_virt, *dst_virt;
- struct scif_window *src_window = work->src_window;
- struct scif_window *dst_window = work->dst_window;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- int ret = 0;
- struct dma_async_tx_descriptor *tx;
- struct dma_device *dev = chan->device;
- dma_cookie_t cookie;
-
- remaining_len = work->len;
- scif_init_window_iter(src_window, &src_win_iter);
- scif_init_window_iter(dst_window, &dst_win_iter);
-
- src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
- if (src_cache_off != 0) {
- /* Head */
- loop_len = L1_CACHE_BYTES - src_cache_off;
- loop_len = min(loop_len, remaining_len);
- src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
- if (src_window->type == SCIF_WINDOW_SELF)
- src_virt = _get_local_va(src_offset, src_window,
- loop_len);
- else
- src_virt = ioremap_remote(src_offset, src_window,
- loop_len,
- work->remote_dev, NULL);
- if (!src_virt)
- return -ENOMEM;
- if (dst_window->type == SCIF_WINDOW_SELF)
- dst_virt = _get_local_va(dst_offset, dst_window,
- loop_len);
- else
- dst_virt = ioremap_remote(dst_offset, dst_window,
- loop_len,
- work->remote_dev, NULL);
- if (!dst_virt) {
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
- return -ENOMEM;
- }
- if (src_window->type == SCIF_WINDOW_SELF)
- scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
- remaining_len == loop_len ?
- work->ordered : false);
- else
- scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len,
- remaining_len == loop_len ?
- work->ordered : false);
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
- if (dst_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(dst_virt, loop_len, work);
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- }
-
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- tail_len = remaining_len & (L1_CACHE_BYTES - 1);
- remaining_len -= tail_len;
- while (remaining_len) {
- if (src_offset == end_src_offset) {
- src_window = list_next_entry(src_window, list);
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(src_window, &src_win_iter);
- }
- if (dst_offset == end_dst_offset) {
- dst_window = list_next_entry(dst_window, list);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(dst_window, &dst_win_iter);
- }
-
- /* compute dma addresses for transfer */
- src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
- &src_contig_bytes,
- &src_win_iter);
- dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
- &dst_contig_bytes,
- &dst_win_iter);
- loop_len = min(src_contig_bytes, dst_contig_bytes);
- loop_len = min(loop_len, remaining_len);
- if (work->ordered && !tail_len &&
- !(remaining_len - loop_len)) {
- /*
- * Break up the last chunk of the transfer into two
- * steps. if there is no tail to gurantee DMA ordering.
- * Passing SCIF_DMA_POLLING inserts a status update
- * descriptor in step 1 which acts as a double sided
- * synchronization fence for the DMA engine to ensure
- * that the last cache line in step 2 is updated last.
- */
- /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len -
- L1_CACHE_BYTES,
- DMA_PREP_FENCE);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- src_offset += (loop_len - L1_CACHE_BYTES);
- dst_offset += (loop_len - L1_CACHE_BYTES);
- src_dma_addr += (loop_len - L1_CACHE_BYTES);
- dst_dma_addr += (loop_len - L1_CACHE_BYTES);
- remaining_len -= (loop_len - L1_CACHE_BYTES);
- loop_len = remaining_len;
-
- /* Step 2) DMA: L1_CACHE_BYTES */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- } else {
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- }
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- }
- remaining_len = tail_len;
- if (remaining_len) {
- loop_len = remaining_len;
- if (src_offset == end_src_offset)
- src_window = list_next_entry(src_window, list);
- if (dst_offset == end_dst_offset)
- dst_window = list_next_entry(dst_window, list);
-
- src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
- /*
- * The CPU copy for the tail bytes must be initiated only once
- * previous DMA transfers for this endpoint have completed to
- * guarantee ordering.
- */
- if (work->ordered) {
- struct scif_dev *rdev = work->remote_dev;
-
- ret = scif_drain_dma_poll(rdev->sdev, chan);
- if (ret)
- return ret;
- }
- if (src_window->type == SCIF_WINDOW_SELF)
- src_virt = _get_local_va(src_offset, src_window,
- loop_len);
- else
- src_virt = ioremap_remote(src_offset, src_window,
- loop_len,
- work->remote_dev, NULL);
- if (!src_virt)
- return -ENOMEM;
-
- if (dst_window->type == SCIF_WINDOW_SELF)
- dst_virt = _get_local_va(dst_offset, dst_window,
- loop_len);
- else
- dst_virt = ioremap_remote(dst_offset, dst_window,
- loop_len,
- work->remote_dev, NULL);
- if (!dst_virt) {
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
- return -ENOMEM;
- }
-
- if (src_window->type == SCIF_WINDOW_SELF)
- scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
- work->ordered);
- else
- scif_unaligned_cpy_fromio(dst_virt, src_virt,
- loop_len, work->ordered);
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
-
- if (dst_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(dst_virt, loop_len, work);
- remaining_len -= loop_len;
- }
- return ret;
-err:
- dev_err(scif_info.mdev.this_device,
- "%s %d Desc Prog Failed ret %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-/*
- * scif_rma_list_cpu_copy:
- *
- * Traverse all the windows and perform CPU copy.
- */
-static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
-{
- void *src_virt, *dst_virt;
- size_t loop_len, remaining_len;
- int src_page_off, dst_page_off;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- struct scif_window *src_window = work->src_window;
- struct scif_window *dst_window = work->dst_window;
- s64 end_src_offset, end_dst_offset;
- int ret = 0;
- struct scif_window_iter src_win_iter;
- struct scif_window_iter dst_win_iter;
-
- remaining_len = work->len;
-
- scif_init_window_iter(src_window, &src_win_iter);
- scif_init_window_iter(dst_window, &dst_win_iter);
- while (remaining_len) {
- src_page_off = src_offset & ~PAGE_MASK;
- dst_page_off = dst_offset & ~PAGE_MASK;
- loop_len = min(PAGE_SIZE -
- max(src_page_off, dst_page_off),
- remaining_len);
-
- if (src_window->type == SCIF_WINDOW_SELF)
- src_virt = _get_local_va(src_offset, src_window,
- loop_len);
- else
- src_virt = ioremap_remote(src_offset, src_window,
- loop_len,
- work->remote_dev,
- &src_win_iter);
- if (!src_virt) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (dst_window->type == SCIF_WINDOW_SELF)
- dst_virt = _get_local_va(dst_offset, dst_window,
- loop_len);
- else
- dst_virt = ioremap_remote(dst_offset, dst_window,
- loop_len,
- work->remote_dev,
- &dst_win_iter);
- if (!dst_virt) {
- if (src_window->type == SCIF_WINDOW_PEER)
- iounmap_remote(src_virt, loop_len, work);
- ret = -ENOMEM;
- goto error;
- }
-
- if (work->loopback) {
- memcpy(dst_virt, src_virt, loop_len);
- } else {
- if (src_window->type == SCIF_WINDOW_SELF)
- memcpy_toio((void __iomem __force *)dst_virt,
- src_virt, loop_len);
- else
- memcpy_fromio(dst_virt,
- (void __iomem __force *)src_virt,
- loop_len);
- }
- if (src_window->type == SCIF_WINDOW_PEER)
- iounmap_remote(src_virt, loop_len, work);
-
- if (dst_window->type == SCIF_WINDOW_PEER)
- iounmap_remote(dst_virt, loop_len, work);
-
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- if (remaining_len) {
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- if (src_offset == end_src_offset) {
- src_window = list_next_entry(src_window, list);
- scif_init_window_iter(src_window,
- &src_win_iter);
- }
- if (dst_offset == end_dst_offset) {
- dst_window = list_next_entry(dst_window, list);
- scif_init_window_iter(dst_window,
- &dst_win_iter);
- }
- }
- }
-error:
- return ret;
-}
-
-static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
- struct scif_copy_work *work,
- struct dma_chan *chan, off_t loffset)
-{
- int src_cache_off, dst_cache_off;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- u8 *temp = NULL;
- bool src_local = true;
- struct scif_dma_comp_cb *comp_cb;
- int err;
-
- if (is_dma_copy_aligned(chan->device, 1, 1, 1))
- return _scif_rma_list_dma_copy_aligned(work, chan);
-
- src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
- dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1);
-
- if (dst_cache_off == src_cache_off)
- return scif_rma_list_dma_copy_aligned(work, chan);
-
- if (work->loopback)
- return scif_rma_list_cpu_copy(work);
- src_local = work->src_window->type == SCIF_WINDOW_SELF;
-
- /* Allocate dma_completion cb */
- comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
- if (!comp_cb)
- goto error;
-
- work->comp_cb = comp_cb;
- comp_cb->cb_cookie = comp_cb;
- comp_cb->dma_completion_func = &scif_rma_completion_cb;
-
- if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) {
- comp_cb->is_cache = false;
- /* Allocate padding bytes to align to a cache line */
- temp = kmalloc(work->len + (L1_CACHE_BYTES << 1),
- GFP_KERNEL);
- if (!temp)
- goto free_comp_cb;
- comp_cb->temp_buf_to_free = temp;
- /* kmalloc(..) does not guarantee cache line alignment */
- if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES))
- temp = PTR_ALIGN(temp, L1_CACHE_BYTES);
- } else {
- comp_cb->is_cache = true;
- temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL);
- if (!temp)
- goto free_comp_cb;
- comp_cb->temp_buf_to_free = temp;
- }
-
- if (src_local) {
- temp += dst_cache_off;
- scif_rma_local_cpu_copy(work->src_offset, work->src_window,
- temp, work->len, true);
- } else {
- comp_cb->dst_window = work->dst_window;
- comp_cb->dst_offset = work->dst_offset;
- work->src_offset = work->src_offset - src_cache_off;
- comp_cb->len = work->len;
- work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES);
- comp_cb->header_padding = src_cache_off;
- }
- comp_cb->temp_buf = temp;
-
- err = scif_map_single(&comp_cb->temp_phys, temp,
- work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE);
- if (err)
- goto free_temp_buf;
- comp_cb->sdev = work->remote_dev;
- if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0)
- goto free_temp_buf;
- if (!src_local)
- work->fence_type = SCIF_DMA_INTR;
- return 0;
-free_temp_buf:
- if (comp_cb->is_cache)
- kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free);
- else
- kfree(comp_cb->temp_buf_to_free);
-free_comp_cb:
- kfree(comp_cb);
-error:
- return -ENOMEM;
-}
-
-/**
- * scif_rma_copy:
- * @epd: end point descriptor.
- * @loffset: offset in local registered address space to/from which to copy
- * @addr: user virtual address to/from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to/from which to copy
- * @flags: flags
- * @dir: LOCAL->REMOTE or vice versa.
- * @last_chunk: true if this is the last chunk of a larger transfer
- *
- * Validate parameters, check if src/dst registered ranges requested for copy
- * are valid and initiate either CPU or DMA copy.
- */
-static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
- size_t len, off_t roffset, int flags,
- enum scif_rma_dir dir, bool last_chunk)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_rma_req remote_req;
- struct scif_rma_req req;
- struct scif_window *local_window = NULL;
- struct scif_window *remote_window = NULL;
- struct scif_copy_work copy_work;
- bool loopback;
- int err = 0;
- struct dma_chan *chan;
- struct scif_mmu_notif *mmn = NULL;
- bool cache = false;
- struct device *spdev;
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE |
- SCIF_RMA_SYNC | SCIF_RMA_ORDERED)))
- return -EINVAL;
-
- loopback = scifdev_self(ep->remote_dev) ? true : false;
- copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ?
- SCIF_DMA_POLL : 0;
- copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk);
-
- /* Use CPU for Mgmt node <-> Mgmt node copies */
- if (loopback && scif_is_mgmt_node()) {
- flags |= SCIF_RMA_USECPU;
- copy_work.fence_type = 0x0;
- }
-
- cache = scif_is_set_reg_cache(flags);
-
- remote_req.out_window = &remote_window;
- remote_req.offset = roffset;
- remote_req.nr_bytes = len;
- /*
- * If transfer is from local to remote then the remote window
- * must be writeable and vice versa.
- */
- remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ;
- remote_req.type = SCIF_WINDOW_PARTIAL;
- remote_req.head = &ep->rma_info.remote_reg_list;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
-
- if (addr && cache) {
- mutex_lock(&ep->rma_info.mmn_lock);
- mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
- if (!mmn)
- mmn = scif_add_mmu_notifier(current->mm, ep);
- mutex_unlock(&ep->rma_info.mmn_lock);
- if (IS_ERR(mmn)) {
- scif_put_peer_dev(spdev);
- return PTR_ERR(mmn);
- }
- cache = cache && !scif_rma_tc_can_cache(ep, len);
- }
- mutex_lock(&ep->rma_info.rma_lock);
- if (addr) {
- req.out_window = &local_window;
- req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK),
- PAGE_SIZE);
- req.va_for_temp = addr & PAGE_MASK;
- req.prot = (dir == SCIF_LOCAL_TO_REMOTE ?
- VM_READ : VM_WRITE | VM_READ);
- /* Does a valid local window exist? */
- if (mmn) {
- spin_lock(&ep->rma_info.tc_lock);
- req.head = &mmn->tc_reg_list;
- err = scif_query_tcw(ep, &req);
- spin_unlock(&ep->rma_info.tc_lock);
- }
- if (!mmn || err) {
- err = scif_register_temp(epd, req.va_for_temp,
- req.nr_bytes, req.prot,
- &loffset, &local_window);
- if (err) {
- mutex_unlock(&ep->rma_info.rma_lock);
- goto error;
- }
- if (!cache)
- goto skip_cache;
- atomic_inc(&ep->rma_info.tcw_refcount);
- atomic_add_return(local_window->nr_pages,
- &ep->rma_info.tcw_total_pages);
- if (mmn) {
- spin_lock(&ep->rma_info.tc_lock);
- scif_insert_tcw(local_window,
- &mmn->tc_reg_list);
- spin_unlock(&ep->rma_info.tc_lock);
- }
- }
-skip_cache:
- loffset = local_window->offset +
- (addr - local_window->va_for_temp);
- } else {
- req.out_window = &local_window;
- req.offset = loffset;
- /*
- * If transfer is from local to remote then the self window
- * must be readable and vice versa.
- */
- req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE;
- req.nr_bytes = len;
- req.type = SCIF_WINDOW_PARTIAL;
- req.head = &ep->rma_info.reg_list;
- /* Does a valid local window exist? */
- err = scif_query_window(&req);
- if (err) {
- mutex_unlock(&ep->rma_info.rma_lock);
- goto error;
- }
- }
-
- /* Does a valid remote window exist? */
- err = scif_query_window(&remote_req);
- if (err) {
- mutex_unlock(&ep->rma_info.rma_lock);
- goto error;
- }
-
- /*
- * Prepare copy_work for submitting work to the DMA kernel thread
- * or CPU copy routine.
- */
- copy_work.len = len;
- copy_work.loopback = loopback;
- copy_work.remote_dev = ep->remote_dev;
- if (dir == SCIF_LOCAL_TO_REMOTE) {
- copy_work.src_offset = loffset;
- copy_work.src_window = local_window;
- copy_work.dst_offset = roffset;
- copy_work.dst_window = remote_window;
- } else {
- copy_work.src_offset = roffset;
- copy_work.src_window = remote_window;
- copy_work.dst_offset = loffset;
- copy_work.dst_window = local_window;
- }
-
- if (flags & SCIF_RMA_USECPU) {
- scif_rma_list_cpu_copy(&copy_work);
- } else {
- chan = ep->rma_info.dma_chan;
- err = scif_rma_list_dma_copy_wrapper(epd, &copy_work,
- chan, loffset);
- }
- if (addr && !cache)
- atomic_inc(&ep->rma_info.tw_refcount);
-
- mutex_unlock(&ep->rma_info.rma_lock);
-
- if (last_chunk) {
- struct scif_dev *rdev = ep->remote_dev;
-
- if (copy_work.fence_type == SCIF_DMA_POLL)
- err = scif_drain_dma_poll(rdev->sdev,
- ep->rma_info.dma_chan);
- else if (copy_work.fence_type == SCIF_DMA_INTR)
- err = scif_drain_dma_intr(rdev->sdev,
- ep->rma_info.dma_chan);
- }
-
- if (addr && !cache)
- scif_queue_for_cleanup(local_window, &scif_info.rma);
- scif_put_peer_dev(spdev);
- return err;
-error:
- if (err) {
- if (addr && local_window && !cache)
- scif_destroy_window(ep, local_window);
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d len 0x%lx\n",
- __func__, __LINE__, err, len);
- }
- scif_put_peer_dev(spdev);
- return err;
-}
-
-int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n",
- epd, loffset, len, roffset, flags);
- if (scif_unaligned(loffset, roffset)) {
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, loffset, 0x0,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_REMOTE_TO_LOCAL, false);
- if (err)
- goto readfrom_err;
- loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, loffset, 0x0, len,
- roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
-readfrom_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_readfrom);
-
-int scif_writeto(scif_epd_t epd, off_t loffset, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n",
- epd, loffset, len, roffset, flags);
- if (scif_unaligned(loffset, roffset)) {
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, loffset, 0x0,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_LOCAL_TO_REMOTE, false);
- if (err)
- goto writeto_err;
- loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, loffset, 0x0, len,
- roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
-writeto_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_writeto);
-
-int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
- epd, addr, len, roffset, flags);
- if (scif_unaligned((off_t __force)addr, roffset)) {
- if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
- flags &= ~SCIF_RMA_USECACHE;
-
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, 0, (u64)addr,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_REMOTE_TO_LOCAL, false);
- if (err)
- goto vreadfrom_err;
- addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, 0, (u64)addr, len,
- roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
-vreadfrom_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_vreadfrom);
-
-int scif_vwriteto(scif_epd_t epd, void *addr, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
- epd, addr, len, roffset, flags);
- if (scif_unaligned((off_t __force)addr, roffset)) {
- if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
- flags &= ~SCIF_RMA_USECACHE;
-
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, 0, (u64)addr,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_LOCAL_TO_REMOTE, false);
- if (err)
- goto vwriteto_err;
- addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, 0, (u64)addr, len,
- roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
-vwriteto_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_vwriteto);
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
deleted file mode 100644
index 426687f6696b..000000000000
--- a/drivers/misc/mic/scif/scif_epd.c
+++ /dev/null
@@ -1,357 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include "scif_map.h"
-
-void scif_cleanup_ep_qp(struct scif_endpt *ep)
-{
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (qp->outbound_q.rb_base) {
- scif_iounmap((void *)qp->outbound_q.rb_base,
- qp->outbound_q.size, ep->remote_dev);
- qp->outbound_q.rb_base = NULL;
- }
- if (qp->remote_qp) {
- scif_iounmap((void *)qp->remote_qp,
- sizeof(struct scif_qp), ep->remote_dev);
- qp->remote_qp = NULL;
- }
- if (qp->local_qp) {
- scif_unmap_single(qp->local_qp, ep->remote_dev,
- sizeof(struct scif_qp));
- qp->local_qp = 0x0;
- }
- if (qp->local_buf) {
- scif_unmap_single(qp->local_buf, ep->remote_dev,
- SCIF_ENDPT_QP_SIZE);
- qp->local_buf = 0;
- }
-}
-
-void scif_teardown_ep(void *endpt)
-{
- struct scif_endpt *ep = endpt;
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (qp) {
- spin_lock(&ep->lock);
- scif_cleanup_ep_qp(ep);
- spin_unlock(&ep->lock);
- kfree(qp->inbound_q.rb_base);
- kfree(qp);
- }
-}
-
-/*
- * Enqueue the endpoint to the zombie list for cleanup.
- * The endpoint should not be accessed once this API returns.
- */
-void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
-{
- if (!eplock_held)
- mutex_lock(&scif_info.eplock);
- spin_lock(&ep->lock);
- ep->state = SCIFEP_ZOMBIE;
- spin_unlock(&ep->lock);
- list_add_tail(&ep->list, &scif_info.zombie);
- scif_info.nr_zombies++;
- if (!eplock_held)
- mutex_unlock(&scif_info.eplock);
- schedule_work(&scif_info.misc_work);
-}
-
-static struct scif_endpt *scif_find_listen_ep(u16 port)
-{
- struct scif_endpt *ep = NULL;
- struct list_head *pos, *tmpq;
-
- mutex_lock(&scif_info.eplock);
- list_for_each_safe(pos, tmpq, &scif_info.listen) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (ep->port.port == port) {
- mutex_unlock(&scif_info.eplock);
- return ep;
- }
- }
- mutex_unlock(&scif_info.eplock);
- return NULL;
-}
-
-void scif_cleanup_zombie_epd(void)
-{
- struct list_head *pos, *tmpq;
- struct scif_endpt *ep;
-
- mutex_lock(&scif_info.eplock);
- list_for_each_safe(pos, tmpq, &scif_info.zombie) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (scif_rma_ep_can_uninit(ep)) {
- list_del(pos);
- scif_info.nr_zombies--;
- put_iova_domain(&ep->rma_info.iovad);
- kfree(ep);
- }
- }
- mutex_unlock(&scif_info.eplock);
-}
-
-/**
- * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * This message is initiated by the remote node to request a connection
- * to the local node. This function looks for an end point in the
- * listen state on the requested port id.
- *
- * If it finds a listening port it places the connect request on the
- * listening end points queue and wakes up any pending accept calls.
- *
- * If it does not find a listening end point it sends a connection
- * reject message to the remote node.
- */
-void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = NULL;
- struct scif_conreq *conreq;
-
- conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
- if (!conreq)
- /* Lack of resources so reject the request. */
- goto conreq_sendrej;
-
- ep = scif_find_listen_ep(msg->dst.port);
- if (!ep)
- /* Send reject due to no listening ports */
- goto conreq_sendrej_free;
- else
- spin_lock(&ep->lock);
-
- if (ep->backlog <= ep->conreqcnt) {
- /* Send reject due to too many pending requests */
- spin_unlock(&ep->lock);
- goto conreq_sendrej_free;
- }
-
- conreq->msg = *msg;
- list_add_tail(&conreq->list, &ep->conlist);
- ep->conreqcnt++;
- wake_up_interruptible(&ep->conwq);
- spin_unlock(&ep->lock);
- return;
-
-conreq_sendrej_free:
- kfree(conreq);
-conreq_sendrej:
- msg->uop = SCIF_CNCT_REJ;
- scif_nodeqp_send(&scif_dev[msg->src.node], msg);
-}
-
-/**
- * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * An accept() on the remote node has occurred and sent this message
- * to indicate success. Place the end point in the MAPPING state and
- * save the remote nodes memory information. Then wake up the connect
- * request so it can finish.
- */
-void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTING == ep->state) {
- ep->peer.node = msg->src.node;
- ep->peer.port = msg->src.port;
- ep->qp_info.gnt_pld = msg->payload[1];
- ep->remote_ep = msg->payload[2];
- ep->state = SCIFEP_MAPPING;
-
- wake_up(&ep->conwq);
- }
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote connection request has finished mapping the local memory.
- * Place the connection in the connected state and wake up the pending
- * accept() call.
- */
-void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- mutex_lock(&scif_info.connlock);
- spin_lock(&ep->lock);
- /* New ep is now connected with all resources set. */
- ep->state = SCIFEP_CONNECTED;
- list_add_tail(&ep->list, &scif_info.connected);
- wake_up(&ep->conwq);
- spin_unlock(&ep->lock);
- mutex_unlock(&scif_info.connlock);
-}
-
-/**
- * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote connection request failed to map the local memory it was sent.
- * Place the end point in the CLOSING state to indicate it and wake up
- * the pending accept();
- */
-void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- ep->state = SCIFEP_CLOSING;
- wake_up(&ep->conwq);
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote end has rejected the connection request. Set the end
- * point back to the bound state and wake up the pending connect().
- */
-void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTING == ep->state) {
- ep->state = SCIFEP_BOUND;
- wake_up(&ep->conwq);
- }
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote node has indicated close() has been called on its end
- * point. Remove the local end point from the connected list, set its
- * state to disconnected and ensure accesses to the remote node are
- * shutdown.
- *
- * When all accesses to the remote end have completed then send a
- * DISCNT_ACK to indicate it can remove its resources and complete
- * the close routine.
- */
-void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = NULL;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
-
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- /*
- * The local ep may have sent a disconnect and and been closed
- * due to a message response time out. It may have been
- * allocated again and formed a new connection so we want to
- * check if the remote ep matches
- */
- if (((u64)tmpep == msg->payload[1]) &&
- ((u64)tmpep->remote_ep == msg->payload[0])) {
- list_del(pos);
- ep = tmpep;
- spin_lock(&ep->lock);
- break;
- }
- }
-
- /*
- * If the terminated end is not found then this side started closing
- * before the other side sent the disconnect. If so the ep will no
- * longer be on the connected list. Regardless the other side
- * needs to be acked to let it know close is complete.
- */
- if (!ep) {
- mutex_unlock(&scif_info.connlock);
- goto discnct_ack;
- }
-
- ep->state = SCIFEP_DISCONNECTED;
- list_add_tail(&ep->list, &scif_info.disconnected);
-
- wake_up_interruptible(&ep->sendwq);
- wake_up_interruptible(&ep->recvwq);
- spin_unlock(&ep->lock);
- mutex_unlock(&scif_info.connlock);
-
-discnct_ack:
- msg->uop = SCIF_DISCNT_ACK;
- scif_nodeqp_send(&scif_dev[msg->src.node], msg);
-}
-
-/**
- * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side has indicated it has not more references to local resources
- */
-void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- ep->state = SCIFEP_DISCONNECTED;
- spin_unlock(&ep->lock);
- complete(&ep->discon);
-}
-
-/**
- * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side is confirming send or receive interrupt handling is complete.
- */
-void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTED == ep->state)
- wake_up_interruptible(&ep->recvwq);
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side is confirming send or receive interrupt handling is complete.
- */
-void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTED == ep->state)
- wake_up_interruptible(&ep->sendwq);
- spin_unlock(&ep->lock);
-}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
deleted file mode 100644
index 0b9dfe1cc06c..000000000000
--- a/drivers/misc/mic/scif/scif_epd.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_EPD_H
-#define SCIF_EPD_H
-
-#include <linux/delay.h>
-#include <linux/scif.h>
-#include <linux/scif_ioctl.h>
-
-#define SCIF_EPLOCK_HELD true
-
-enum scif_epd_state {
- SCIFEP_UNBOUND,
- SCIFEP_BOUND,
- SCIFEP_LISTENING,
- SCIFEP_CONNECTED,
- SCIFEP_CONNECTING,
- SCIFEP_MAPPING,
- SCIFEP_CLOSING,
- SCIFEP_CLLISTEN,
- SCIFEP_DISCONNECTED,
- SCIFEP_ZOMBIE
-};
-
-/*
- * struct scif_conreq - Data structure added to the connection list.
- *
- * @msg: connection request message received
- * @list: link to list of connection requests
- */
-struct scif_conreq {
- struct scifmsg msg;
- struct list_head list;
-};
-
-/* Size of the RB for the Endpoint QP */
-#define SCIF_ENDPT_QP_SIZE 0x1000
-
-/*
- * scif_endpt_qp_info - SCIF endpoint queue pair
- *
- * @qp - Qpair for this endpoint
- * @qp_offset - DMA address of the QP
- * @gnt_pld - Payload in a SCIF_CNCT_GNT message containing the
- * physical address of the remote_qp.
- */
-struct scif_endpt_qp_info {
- struct scif_qp *qp;
- dma_addr_t qp_offset;
- dma_addr_t gnt_pld;
-};
-
-/*
- * struct scif_endpt - The SCIF endpoint data structure
- *
- * @state: end point state
- * @lock: lock synchronizing access to endpoint fields like state etc
- * @port: self port information
- * @peer: peer port information
- * @backlog: maximum pending connection requests
- * @qp_info: Endpoint QP information for SCIF messaging
- * @remote_dev: scifdev used by this endpt to communicate with remote node.
- * @remote_ep: remote endpoint
- * @conreqcnt: Keep track of number of connection requests.
- * @files: Open file information used to match the id passed in with
- * the flush routine.
- * @conlist: list of connection requests
- * @conwq: waitqueue for connection processing
- * @discon: completion used during disconnection
- * @sendwq: waitqueue used during sending messages
- * @recvwq: waitqueue used during message receipt
- * @sendlock: Synchronize ordering of messages sent
- * @recvlock: Synchronize ordering of messages received
- * @list: link to list of various endpoints like connected, listening etc
- * @li_accept: pending ACCEPTREG
- * @acceptcnt: pending ACCEPTREG cnt
- * @liacceptlist: link to listen accept
- * @miacceptlist: link to uaccept
- * @listenep: associated listen ep
- * @conn_work: Non blocking connect work
- * @conn_port: Connection port
- * @conn_err: Errors during connection
- * @conn_async_state: Async connection
- * @conn_pend_wq: Used by poll while waiting for incoming connections
- * @conn_list: List of async connection requests
- * @rma_info: Information for triggering SCIF RMA and DMA operations
- * @mmu_list: link to list of MMU notifier cleanup work
- * @anon: anonymous file for use in kernel mode scif poll
- */
-struct scif_endpt {
- enum scif_epd_state state;
- spinlock_t lock;
- struct scif_port_id port;
- struct scif_port_id peer;
- int backlog;
- struct scif_endpt_qp_info qp_info;
- struct scif_dev *remote_dev;
- u64 remote_ep;
- int conreqcnt;
- struct files_struct *files;
- struct list_head conlist;
- wait_queue_head_t conwq;
- struct completion discon;
- wait_queue_head_t sendwq;
- wait_queue_head_t recvwq;
- struct mutex sendlock;
- struct mutex recvlock;
- struct list_head list;
- struct list_head li_accept;
- int acceptcnt;
- struct list_head liacceptlist;
- struct list_head miacceptlist;
- struct scif_endpt *listenep;
- struct scif_port_id conn_port;
- int conn_err;
- int conn_async_state;
- wait_queue_head_t conn_pend_wq;
- struct list_head conn_list;
- struct scif_endpt_rma_info rma_info;
- struct list_head mmu_list;
- struct file *anon;
-};
-
-static inline int scifdev_alive(struct scif_endpt *ep)
-{
- return _scifdev_alive(ep->remote_dev);
-}
-
-/*
- * scif_verify_epd:
- * ep: SCIF endpoint
- *
- * Checks several generic error conditions and returns the
- * appropriate error.
- */
-static inline int scif_verify_epd(struct scif_endpt *ep)
-{
- if (ep->state == SCIFEP_DISCONNECTED)
- return -ECONNRESET;
-
- if (ep->state != SCIFEP_CONNECTED)
- return -ENOTCONN;
-
- if (!scifdev_alive(ep))
- return -ENODEV;
-
- return 0;
-}
-
-static inline int scif_anon_inode_getfile(scif_epd_t epd)
-{
- epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0);
-
- return PTR_ERR_OR_ZERO(epd->anon);
-}
-
-static inline void scif_anon_inode_fput(scif_epd_t epd)
-{
- if (epd->anon) {
- fput(epd->anon);
- epd->anon = NULL;
- }
-}
-
-void scif_cleanup_zombie_epd(void);
-void scif_teardown_ep(void *endpt);
-void scif_cleanup_ep_qp(struct scif_endpt *ep);
-void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held);
-void scif_get_node_info(void);
-void scif_send_acks(struct scif_dev *dev);
-void scif_conn_handler(struct work_struct *work);
-int scif_rsrv_port(u16 port);
-void scif_get_port(u16 port);
-int scif_get_new_port(void);
-void scif_put_port(u16 port);
-int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags);
-int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags);
-void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
-int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
-int __scif_flush(scif_epd_t epd);
-int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
-__poll_t __scif_pollfd(struct file *f, poll_table *wait,
- struct scif_endpt *ep);
-int __scif_pin_pages(void *addr, size_t len, int *out_prot,
- int map_flags, scif_pinned_pages_t *pages);
-#endif /* SCIF_EPD_H */
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
deleted file mode 100644
index 3f08646cd78a..000000000000
--- a/drivers/misc/mic/scif/scif_fd.c
+++ /dev/null
@@ -1,462 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-
-static int scif_fdopen(struct inode *inode, struct file *f)
-{
- struct scif_endpt *priv = scif_open();
-
- if (!priv)
- return -ENOMEM;
- f->private_data = priv;
- return 0;
-}
-
-static int scif_fdclose(struct inode *inode, struct file *f)
-{
- struct scif_endpt *priv = f->private_data;
-
- return scif_close(priv);
-}
-
-static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
-{
- struct scif_endpt *priv = f->private_data;
-
- return scif_mmap(vma, priv);
-}
-
-static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
-{
- struct scif_endpt *priv = f->private_data;
-
- return __scif_pollfd(f, wait, priv);
-}
-
-static int scif_fdflush(struct file *f, fl_owner_t id)
-{
- struct scif_endpt *ep = f->private_data;
-
- spin_lock(&ep->lock);
- /*
- * The listening endpoint stashes the open file information before
- * waiting for incoming connections. The release callback would never be
- * called if the application closed the endpoint, while waiting for
- * incoming connections from a separate thread since the file descriptor
- * reference count is bumped up in the accept IOCTL. Call the flush
- * routine if the id matches the endpoint open file information so that
- * the listening endpoint can be woken up and the fd released.
- */
- if (ep->files == id)
- __scif_flush(ep);
- spin_unlock(&ep->lock);
- return 0;
-}
-
-static __always_inline void scif_err_debug(int err, const char *str)
-{
- /*
- * ENOTCONN is a common uninteresting error which is
- * flooding debug messages to the console unnecessarily.
- */
- if (err < 0 && err != -ENOTCONN)
- dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
-}
-
-static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- struct scif_endpt *priv = f->private_data;
- void __user *argp = (void __user *)arg;
- int err = 0;
- struct scifioctl_msg request;
- bool non_block = false;
-
- non_block = !!(f->f_flags & O_NONBLOCK);
-
- switch (cmd) {
- case SCIF_BIND:
- {
- int pn;
-
- if (copy_from_user(&pn, argp, sizeof(pn)))
- return -EFAULT;
-
- pn = scif_bind(priv, pn);
- if (pn < 0)
- return pn;
-
- if (copy_to_user(argp, &pn, sizeof(pn)))
- return -EFAULT;
-
- return 0;
- }
- case SCIF_LISTEN:
- return scif_listen(priv, arg);
- case SCIF_CONNECT:
- {
- struct scifioctl_connect req;
- struct scif_endpt *ep = (struct scif_endpt *)priv;
-
- if (copy_from_user(&req, argp, sizeof(req)))
- return -EFAULT;
-
- err = __scif_connect(priv, &req.peer, non_block);
- if (err < 0)
- return err;
-
- req.self.node = ep->port.node;
- req.self.port = ep->port.port;
-
- if (copy_to_user(argp, &req, sizeof(req)))
- return -EFAULT;
-
- return 0;
- }
- /*
- * Accept is done in two halves. The request ioctl does the basic
- * functionality of accepting the request and returning the information
- * about it including the internal ID of the end point. The register
- * is done with the internal ID on a new file descriptor opened by the
- * requesting process.
- */
- case SCIF_ACCEPTREQ:
- {
- struct scifioctl_accept request;
- scif_epd_t *ep = (scif_epd_t *)&request.endpt;
-
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
-
- err = scif_accept(priv, &request.peer, ep, request.flags);
- if (err < 0)
- return err;
-
- if (copy_to_user(argp, &request, sizeof(request))) {
- scif_close(*ep);
- return -EFAULT;
- }
- /*
- * Add to the list of user mode eps where the second half
- * of the accept is not yet completed.
- */
- mutex_lock(&scif_info.eplock);
- list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
- list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
- (*ep)->listenep = priv;
- priv->acceptcnt++;
- mutex_unlock(&scif_info.eplock);
-
- return 0;
- }
- case SCIF_ACCEPTREG:
- {
- struct scif_endpt *priv = f->private_data;
- struct scif_endpt *newep;
- struct scif_endpt *lisep;
- struct scif_endpt *fep = NULL;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
-
- /* Finally replace the pointer to the accepted endpoint */
- if (copy_from_user(&newep, argp, sizeof(void *)))
- return -EFAULT;
-
- /* Remove form the user accept queue */
- mutex_lock(&scif_info.eplock);
- list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
- tmpep = list_entry(pos,
- struct scif_endpt, miacceptlist);
- if (tmpep == newep) {
- list_del(pos);
- fep = tmpep;
- break;
- }
- }
-
- if (!fep) {
- mutex_unlock(&scif_info.eplock);
- return -ENOENT;
- }
-
- lisep = newep->listenep;
- list_for_each_safe(pos, tmpq, &lisep->li_accept) {
- tmpep = list_entry(pos,
- struct scif_endpt, liacceptlist);
- if (tmpep == newep) {
- list_del(pos);
- lisep->acceptcnt--;
- break;
- }
- }
-
- mutex_unlock(&scif_info.eplock);
-
- /* Free the resources automatically created from the open. */
- scif_anon_inode_fput(priv);
- scif_teardown_ep(priv);
- scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
- f->private_data = newep;
- return 0;
- }
- case SCIF_SEND:
- {
- struct scif_endpt *priv = f->private_data;
-
- if (copy_from_user(&request, argp,
- sizeof(struct scifioctl_msg))) {
- err = -EFAULT;
- goto send_err;
- }
- err = scif_user_send(priv, (void __user *)request.msg,
- request.len, request.flags);
- if (err < 0)
- goto send_err;
- if (copy_to_user(&
- ((struct scifioctl_msg __user *)argp)->out_len,
- &err, sizeof(err))) {
- err = -EFAULT;
- goto send_err;
- }
- err = 0;
-send_err:
- scif_err_debug(err, "scif_send");
- return err;
- }
- case SCIF_RECV:
- {
- struct scif_endpt *priv = f->private_data;
-
- if (copy_from_user(&request, argp,
- sizeof(struct scifioctl_msg))) {
- err = -EFAULT;
- goto recv_err;
- }
-
- err = scif_user_recv(priv, (void __user *)request.msg,
- request.len, request.flags);
- if (err < 0)
- goto recv_err;
-
- if (copy_to_user(&
- ((struct scifioctl_msg __user *)argp)->out_len,
- &err, sizeof(err))) {
- err = -EFAULT;
- goto recv_err;
- }
- err = 0;
-recv_err:
- scif_err_debug(err, "scif_recv");
- return err;
- }
- case SCIF_GET_NODEIDS:
- {
- struct scifioctl_node_ids node_ids;
- int entries;
- u16 *nodes;
- void __user *unodes, *uself;
- u16 self;
-
- if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
- err = -EFAULT;
- goto getnodes_err2;
- }
-
- entries = min_t(int, scif_info.maxid, node_ids.len);
- nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
- if (entries && !nodes) {
- err = -ENOMEM;
- goto getnodes_err2;
- }
- node_ids.len = scif_get_node_ids(nodes, entries, &self);
-
- unodes = (void __user *)node_ids.nodes;
- if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
- err = -EFAULT;
- goto getnodes_err1;
- }
-
- uself = (void __user *)node_ids.self;
- if (copy_to_user(uself, &self, sizeof(u16))) {
- err = -EFAULT;
- goto getnodes_err1;
- }
-
- if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
- err = -EFAULT;
- goto getnodes_err1;
- }
-getnodes_err1:
- kfree(nodes);
-getnodes_err2:
- return err;
- }
- case SCIF_REG:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_reg reg;
- off_t ret;
-
- if (copy_from_user(&reg, argp, sizeof(reg))) {
- err = -EFAULT;
- goto reg_err;
- }
- if (reg.flags & SCIF_MAP_KERNEL) {
- err = -EINVAL;
- goto reg_err;
- }
- ret = scif_register(priv, (void *)reg.addr, reg.len,
- reg.offset, reg.prot, reg.flags);
- if (ret < 0) {
- err = (int)ret;
- goto reg_err;
- }
-
- if (copy_to_user(&((struct scifioctl_reg __user *)argp)
- ->out_offset, &ret, sizeof(reg.out_offset))) {
- err = -EFAULT;
- goto reg_err;
- }
- err = 0;
-reg_err:
- scif_err_debug(err, "scif_register");
- return err;
- }
- case SCIF_UNREG:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_unreg unreg;
-
- if (copy_from_user(&unreg, argp, sizeof(unreg))) {
- err = -EFAULT;
- goto unreg_err;
- }
- err = scif_unregister(priv, unreg.offset, unreg.len);
-unreg_err:
- scif_err_debug(err, "scif_unregister");
- return err;
- }
- case SCIF_READFROM:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto readfrom_err;
- }
- err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
- copy.flags);
-readfrom_err:
- scif_err_debug(err, "scif_readfrom");
- return err;
- }
- case SCIF_WRITETO:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto writeto_err;
- }
- err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
- copy.flags);
-writeto_err:
- scif_err_debug(err, "scif_writeto");
- return err;
- }
- case SCIF_VREADFROM:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto vreadfrom_err;
- }
- err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
- copy.roffset, copy.flags);
-vreadfrom_err:
- scif_err_debug(err, "scif_vreadfrom");
- return err;
- }
- case SCIF_VWRITETO:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto vwriteto_err;
- }
- err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
- copy.roffset, copy.flags);
-vwriteto_err:
- scif_err_debug(err, "scif_vwriteto");
- return err;
- }
- case SCIF_FENCE_MARK:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_fence_mark mark;
- int tmp_mark = 0;
-
- if (copy_from_user(&mark, argp, sizeof(mark))) {
- err = -EFAULT;
- goto fence_mark_err;
- }
- err = scif_fence_mark(priv, mark.flags, &tmp_mark);
- if (err)
- goto fence_mark_err;
- if (copy_to_user((void __user *)mark.mark, &tmp_mark,
- sizeof(tmp_mark))) {
- err = -EFAULT;
- goto fence_mark_err;
- }
-fence_mark_err:
- scif_err_debug(err, "scif_fence_mark");
- return err;
- }
- case SCIF_FENCE_WAIT:
- {
- struct scif_endpt *priv = f->private_data;
-
- err = scif_fence_wait(priv, arg);
- scif_err_debug(err, "scif_fence_wait");
- return err;
- }
- case SCIF_FENCE_SIGNAL:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_fence_signal signal;
-
- if (copy_from_user(&signal, argp, sizeof(signal))) {
- err = -EFAULT;
- goto fence_signal_err;
- }
-
- err = scif_fence_signal(priv, signal.loff, signal.lval,
- signal.roff, signal.rval, signal.flags);
-fence_signal_err:
- scif_err_debug(err, "scif_fence_signal");
- return err;
- }
- }
- return -EINVAL;
-}
-
-const struct file_operations scif_fops = {
- .open = scif_fdopen,
- .release = scif_fdclose,
- .unlocked_ioctl = scif_fdioctl,
- .mmap = scif_fdmmap,
- .poll = scif_fdpoll,
- .flush = scif_fdflush,
- .owner = THIS_MODULE,
-};
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
deleted file mode 100644
index 4fedf6183951..000000000000
--- a/drivers/misc/mic/scif/scif_fence.c
+++ /dev/null
@@ -1,783 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-
-#include "scif_main.h"
-
-/**
- * scif_recv_mark: Handle SCIF_MARK request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested a mark.
- */
-void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int mark = 0;
- int err;
-
- err = _scif_fence_mark(ep, &mark);
- if (err)
- msg->uop = SCIF_MARK_NACK;
- else
- msg->uop = SCIF_MARK_ACK;
- msg->payload[0] = ep->remote_ep;
- msg->payload[2] = mark;
- scif_nodeqp_send(ep->remote_dev, msg);
-}
-
-/**
- * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has responded to a SCIF_MARK message.
- */
-void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_fence_info *fence_req =
- (struct scif_fence_info *)msg->payload[1];
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (msg->uop == SCIF_MARK_ACK) {
- fence_req->state = OP_COMPLETED;
- fence_req->dma_mark = (int)msg->payload[2];
- } else {
- fence_req->state = OP_FAILED;
- }
- mutex_unlock(&ep->rma_info.rma_lock);
- complete(&fence_req->comp);
-}
-
-/**
- * scif_recv_wait: Handle SCIF_WAIT request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested waiting on a fence.
- */
-void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_remote_fence_info *fence;
-
- /*
- * Allocate structure for remote fence information and
- * send a NACK if the allocation failed. The peer will
- * return ENOMEM upon receiving a NACK.
- */
- fence = kmalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence) {
- msg->payload[0] = ep->remote_ep;
- msg->uop = SCIF_WAIT_NACK;
- scif_nodeqp_send(ep->remote_dev, msg);
- return;
- }
-
- /* Prepare the fence request */
- memcpy(&fence->msg, msg, sizeof(struct scifmsg));
- INIT_LIST_HEAD(&fence->list);
-
- /* Insert to the global remote fence request list */
- mutex_lock(&scif_info.fencelock);
- atomic_inc(&ep->rma_info.fence_refcount);
- list_add_tail(&fence->list, &scif_info.fence);
- mutex_unlock(&scif_info.fencelock);
-
- schedule_work(&scif_info.misc_work);
-}
-
-/**
- * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has responded to a SCIF_WAIT message.
- */
-void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_fence_info *fence_req =
- (struct scif_fence_info *)msg->payload[1];
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (msg->uop == SCIF_WAIT_ACK)
- fence_req->state = OP_COMPLETED;
- else
- fence_req->state = OP_FAILED;
- mutex_unlock(&ep->rma_info.rma_lock);
- complete(&fence_req->comp);
-}
-
-/**
- * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested a signal on a local offset.
- */
-void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int err;
-
- err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
- SCIF_WINDOW_SELF);
- if (err)
- msg->uop = SCIF_SIG_NACK;
- else
- msg->uop = SCIF_SIG_ACK;
- msg->payload[0] = ep->remote_ep;
- scif_nodeqp_send(ep->remote_dev, msg);
-}
-
-/**
- * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested a signal on a remote offset.
- */
-void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int err;
-
- err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
- SCIF_WINDOW_PEER);
- if (err)
- msg->uop = SCIF_SIG_NACK;
- else
- msg->uop = SCIF_SIG_ACK;
- msg->payload[0] = ep->remote_ep;
- scif_nodeqp_send(ep->remote_dev, msg);
-}
-
-/**
- * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has responded to a signal request.
- */
-void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_fence_info *fence_req =
- (struct scif_fence_info *)msg->payload[3];
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (msg->uop == SCIF_SIG_ACK)
- fence_req->state = OP_COMPLETED;
- else
- fence_req->state = OP_FAILED;
- mutex_unlock(&ep->rma_info.rma_lock);
- complete(&fence_req->comp);
-}
-
-static inline void *scif_get_local_va(off_t off, struct scif_window *window)
-{
- struct page **pages = window->pinned_pages->pages;
- int page_nr = (off - window->offset) >> PAGE_SHIFT;
- off_t page_off = off & ~PAGE_MASK;
-
- return page_address(pages[page_nr]) + page_off;
-}
-
-static void scif_prog_signal_cb(void *arg)
-{
- struct scif_cb_arg *cb_arg = arg;
-
- dma_pool_free(cb_arg->ep->remote_dev->signal_pool, cb_arg->status,
- cb_arg->src_dma_addr);
- kfree(cb_arg);
-}
-
-static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct dma_chan *chan = ep->rma_info.dma_chan;
- struct dma_device *ddev = chan->device;
- bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
- struct dma_async_tx_descriptor *tx;
- struct scif_status *status = NULL;
- struct scif_cb_arg *cb_arg = NULL;
- dma_addr_t src;
- dma_cookie_t cookie;
- int err;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto alloc_fail;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = (int)cookie;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto alloc_fail;
- }
- dma_async_issue_pending(chan);
- if (x100) {
- /*
- * For X100 use the status descriptor to write the value to
- * the destination.
- */
- tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
- } else {
- status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
- &src);
- if (!status) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto alloc_fail;
- }
- status->val = val;
- status->src_dma_addr = src;
- status->ep = ep;
- src += offsetof(struct scif_status, val);
- tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
- DMA_PREP_INTERRUPT);
- }
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto dma_fail;
- }
- if (!x100) {
- cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
- if (!cb_arg) {
- err = -ENOMEM;
- goto dma_fail;
- }
- cb_arg->src_dma_addr = src;
- cb_arg->status = status;
- cb_arg->ep = ep;
- tx->callback = scif_prog_signal_cb;
- tx->callback_param = cb_arg;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = -EIO;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto dma_fail;
- }
- dma_async_issue_pending(chan);
- return 0;
-dma_fail:
- if (!x100) {
- dma_pool_free(ep->remote_dev->signal_pool, status,
- src - offsetof(struct scif_status, val));
- kfree(cb_arg);
- }
-alloc_fail:
- return err;
-}
-
-/**
- * scif_prog_signal:
- * @epd: Endpoint Descriptor
- * @offset: registered address to write @val to
- * @val: Value to be written at @offset
- * @type: Type of the window.
- *
- * Arrange to write a value to the registered offset after ensuring that the
- * offset provided is indeed valid.
- */
-int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
- enum scif_window_type type)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_window *window = NULL;
- struct scif_rma_req req;
- dma_addr_t dst_dma_addr;
- int err;
-
- mutex_lock(&ep->rma_info.rma_lock);
- req.out_window = &window;
- req.offset = offset;
- req.nr_bytes = sizeof(u64);
- req.prot = SCIF_PROT_WRITE;
- req.type = SCIF_WINDOW_SINGLE;
- if (type == SCIF_WINDOW_SELF)
- req.head = &ep->rma_info.reg_list;
- else
- req.head = &ep->rma_info.remote_reg_list;
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto unlock_ret;
- }
-
- if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
- u64 *dst_virt;
-
- if (type == SCIF_WINDOW_SELF)
- dst_virt = scif_get_local_va(offset, window);
- else
- dst_virt =
- scif_get_local_va(offset, (struct scif_window *)
- window->peer_window);
- *dst_virt = val;
- } else {
- dst_dma_addr = __scif_off_to_dma_addr(window, offset);
- err = _scif_prog_signal(epd, dst_dma_addr, val);
- }
-unlock_ret:
- mutex_unlock(&ep->rma_info.rma_lock);
- return err;
-}
-
-static int _scif_fence_wait(scif_epd_t epd, int mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
- int err;
-
- /* Wait for DMA callback in scif_fence_mark_cb(..) */
- err = wait_event_interruptible_timeout(ep->rma_info.markwq,
- dma_async_is_tx_complete(
- ep->rma_info.dma_chan,
- cookie, NULL, NULL) ==
- DMA_COMPLETE,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err)
- err = -ETIMEDOUT;
- else if (err > 0)
- err = 0;
- return err;
-}
-
-/**
- * scif_rma_handle_remote_fences:
- *
- * This routine services remote fence requests.
- */
-void scif_rma_handle_remote_fences(void)
-{
- struct list_head *item, *tmp;
- struct scif_remote_fence_info *fence;
- struct scif_endpt *ep;
- int mark, err;
-
- might_sleep();
- mutex_lock(&scif_info.fencelock);
- list_for_each_safe(item, tmp, &scif_info.fence) {
- fence = list_entry(item, struct scif_remote_fence_info,
- list);
- /* Remove fence from global list */
- list_del(&fence->list);
-
- /* Initiate the fence operation */
- ep = (struct scif_endpt *)fence->msg.payload[0];
- mark = fence->msg.payload[2];
- err = _scif_fence_wait(ep, mark);
- if (err)
- fence->msg.uop = SCIF_WAIT_NACK;
- else
- fence->msg.uop = SCIF_WAIT_ACK;
- fence->msg.payload[0] = ep->remote_ep;
- scif_nodeqp_send(ep->remote_dev, &fence->msg);
- kfree(fence);
- if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
- schedule_work(&scif_info.misc_work);
- }
- mutex_unlock(&scif_info.fencelock);
-}
-
-static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
-{
- int err;
- struct scifmsg msg;
- struct scif_fence_info *fence_req;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
- if (!fence_req) {
- err = -ENOMEM;
- goto error;
- }
-
- fence_req->state = OP_IN_PROGRESS;
- init_completion(&fence_req->comp);
-
- msg.src = ep->port;
- msg.uop = uop;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = (u64)fence_req;
- if (uop == SCIF_WAIT)
- msg.payload[2] = mark;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- if (err)
- goto error_free;
-retry:
- /* Wait for a SCIF_WAIT_(N)ACK message */
- err = wait_for_completion_timeout(&fence_req->comp,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- if (!err)
- err = -ENODEV;
- if (err > 0)
- err = 0;
- mutex_lock(&ep->rma_info.rma_lock);
- if (err < 0) {
- if (fence_req->state == OP_IN_PROGRESS)
- fence_req->state = OP_FAILED;
- }
- if (fence_req->state == OP_FAILED && !err)
- err = -ENOMEM;
- if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
- *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
- mutex_unlock(&ep->rma_info.rma_lock);
-error_free:
- kfree(fence_req);
-error:
- return err;
-}
-
-/**
- * scif_send_fence_mark:
- * @epd: end point descriptor.
- * @out_mark: Output DMA mark reported by peer.
- *
- * Send a remote fence mark request.
- */
-static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
-{
- return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
-}
-
-/**
- * scif_send_fence_wait:
- * @epd: end point descriptor.
- * @mark: DMA mark to wait for.
- *
- * Send a remote fence wait request.
- */
-static int scif_send_fence_wait(scif_epd_t epd, int mark)
-{
- return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
-}
-
-static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
- struct scif_fence_info *fence_req)
-{
- int err;
-
-retry:
- /* Wait for a SCIF_SIG_(N)ACK message */
- err = wait_for_completion_timeout(&fence_req->comp,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- if (!err)
- err = -ENODEV;
- if (err > 0)
- err = 0;
- if (err < 0) {
- mutex_lock(&ep->rma_info.rma_lock);
- if (fence_req->state == OP_IN_PROGRESS)
- fence_req->state = OP_FAILED;
- mutex_unlock(&ep->rma_info.rma_lock);
- }
- if (fence_req->state == OP_FAILED && !err)
- err = -ENXIO;
- return err;
-}
-
-/**
- * scif_send_fence_signal:
- * @epd: endpoint descriptor
- * @loff: local offset
- * @lval: local value to write to loffset
- * @roff: remote offset
- * @rval: remote value to write to roffset
- * @flags: flags
- *
- * Sends a remote fence signal request
- */
-static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
- off_t loff, u64 lval, int flags)
-{
- int err = 0;
- struct scifmsg msg;
- struct scif_fence_info *fence_req;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
- if (!fence_req) {
- err = -ENOMEM;
- goto error;
- }
-
- fence_req->state = OP_IN_PROGRESS;
- init_completion(&fence_req->comp);
- msg.src = ep->port;
- if (flags & SCIF_SIGNAL_LOCAL) {
- msg.uop = SCIF_SIG_LOCAL;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = roff;
- msg.payload[2] = rval;
- msg.payload[3] = (u64)fence_req;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- if (err)
- goto error_free;
- err = _scif_send_fence_signal_wait(ep, fence_req);
- if (err)
- goto error_free;
- }
- fence_req->state = OP_IN_PROGRESS;
-
- if (flags & SCIF_SIGNAL_REMOTE) {
- msg.uop = SCIF_SIG_REMOTE;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = loff;
- msg.payload[2] = lval;
- msg.payload[3] = (u64)fence_req;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- if (err)
- goto error_free;
- err = _scif_send_fence_signal_wait(ep, fence_req);
- }
-error_free:
- kfree(fence_req);
-error:
- return err;
-}
-
-static void scif_fence_mark_cb(void *arg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)arg;
-
- wake_up_interruptible(&ep->rma_info.markwq);
- atomic_dec(&ep->rma_info.fence_refcount);
-}
-
-/**
- * _scif_fence_mark:
- * @epd: endpoint descriptor
- * @mark: DMA mark to set-up
- *
- * Set up a mark for this endpoint and return the value of the mark.
- */
-int _scif_fence_mark(scif_epd_t epd, int *mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct dma_chan *chan = ep->rma_info.dma_chan;
- struct dma_device *ddev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_cookie_t cookie;
- int err;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = (int)cookie;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- dma_async_issue_pending(chan);
- tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- tx->callback = scif_fence_mark_cb;
- tx->callback_param = ep;
- *mark = cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = (int)cookie;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- atomic_inc(&ep->rma_info.fence_refcount);
- dma_async_issue_pending(chan);
- return 0;
-}
-
-#define SCIF_LOOPB_MAGIC_MARK 0xdead
-
-int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
- ep, flags, *mark);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- /* Invalid flags? */
- if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
- return -EINVAL;
-
- /* At least one of init self or peer RMA should be set */
- if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
- return -EINVAL;
-
- /* Exactly one of init self or peer RMA should be set but not both */
- if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
- return -EINVAL;
-
- /*
- * Management node loopback does not need to use DMA.
- * Return a valid mark to be symmetric.
- */
- if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
- *mark = SCIF_LOOPB_MAGIC_MARK;
- return 0;
- }
-
- if (flags & SCIF_FENCE_INIT_SELF)
- err = _scif_fence_mark(epd, mark);
- else
- err = scif_send_fence_mark(ep, mark);
-
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
- ep, flags, *mark, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_fence_mark);
-
-int scif_fence_wait(scif_epd_t epd, int mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_wait: ep %p mark 0x%x\n",
- ep, mark);
- err = scif_verify_epd(ep);
- if (err)
- return err;
- /*
- * Management node loopback does not need to use DMA.
- * The only valid mark provided is 0 so simply
- * return success if the mark is valid.
- */
- if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
- if (mark == SCIF_LOOPB_MAGIC_MARK)
- return 0;
- else
- return -EINVAL;
- }
- if (mark & SCIF_REMOTE_FENCE)
- err = scif_send_fence_wait(epd, mark);
- else
- err = _scif_fence_wait(epd, mark);
- if (err < 0)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_fence_wait);
-
-int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
- off_t roff, u64 rval, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
- ep, loff, lval, roff, rval, flags);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- /* Invalid flags? */
- if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
- SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
- return -EINVAL;
-
- /* At least one of init self or peer RMA should be set */
- if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
- return -EINVAL;
-
- /* Exactly one of init self or peer RMA should be set but not both */
- if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
- return -EINVAL;
-
- /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
- if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
- return -EINVAL;
-
- /* Only Dword offsets allowed */
- if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
- return -EINVAL;
-
- /* Only Dword aligned offsets allowed */
- if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
- return -EINVAL;
-
- if (flags & SCIF_FENCE_INIT_PEER) {
- err = scif_send_fence_signal(epd, roff, rval, loff,
- lval, flags);
- } else {
- /* Local Signal in Local RAS */
- if (flags & SCIF_SIGNAL_LOCAL) {
- err = scif_prog_signal(epd, loff, lval,
- SCIF_WINDOW_SELF);
- if (err)
- goto error_ret;
- }
-
- /* Signal in Remote RAS */
- if (flags & SCIF_SIGNAL_REMOTE)
- err = scif_prog_signal(epd, roff,
- rval, SCIF_WINDOW_PEER);
- }
-error_ret:
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_fence_signal);
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
deleted file mode 100644
index e2278bf9f11d..000000000000
--- a/drivers/misc/mic/scif/scif_main.c
+++ /dev/null
@@ -1,351 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/module.h>
-#include <linux/idr.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "../bus/scif_bus.h"
-#include "scif_peer_bus.h"
-#include "scif_main.h"
-#include "scif_map.h"
-
-struct scif_info scif_info = {
- .mdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "scif",
- .fops = &scif_fops,
- }
-};
-
-struct scif_dev *scif_dev;
-struct kmem_cache *unaligned_cache;
-static atomic_t g_loopb_cnt;
-
-/* Runs in the context of intr_wq */
-static void scif_intr_bh_handler(struct work_struct *work)
-{
- struct scif_dev *scifdev =
- container_of(work, struct scif_dev, intr_bh);
-
- if (scifdev_self(scifdev))
- scif_loopb_msg_handler(scifdev, scifdev->qpairs);
- else
- scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
-}
-
-int scif_setup_intr_wq(struct scif_dev *scifdev)
-{
- if (!scifdev->intr_wq) {
- snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
- "SCIF INTR %d", scifdev->node);
- scifdev->intr_wq =
- alloc_ordered_workqueue(scifdev->intr_wqname, 0);
- if (!scifdev->intr_wq)
- return -ENOMEM;
- INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
- }
- return 0;
-}
-
-void scif_destroy_intr_wq(struct scif_dev *scifdev)
-{
- if (scifdev->intr_wq) {
- destroy_workqueue(scifdev->intr_wq);
- scifdev->intr_wq = NULL;
- }
-}
-
-irqreturn_t scif_intr_handler(int irq, void *data)
-{
- struct scif_dev *scifdev = data;
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
- queue_work(scifdev->intr_wq, &scifdev->intr_bh);
- return IRQ_HANDLED;
-}
-
-static void scif_qp_setup_handler(struct work_struct *work)
-{
- struct scif_dev *scifdev = container_of(work, struct scif_dev,
- qp_dwork.work);
- struct scif_hw_dev *sdev = scifdev->sdev;
- dma_addr_t da = 0;
- int err;
-
- if (scif_is_mgmt_node()) {
- struct mic_bootparam *bp = sdev->dp;
-
- da = bp->scif_card_dma_addr;
- scifdev->rdb = bp->h2c_scif_db;
- } else {
- struct mic_bootparam __iomem *bp = sdev->rdp;
-
- da = readq(&bp->scif_host_dma_addr);
- scifdev->rdb = ioread8(&bp->c2h_scif_db);
- }
- if (da) {
- err = scif_qp_response(da, scifdev);
- if (err)
- dev_err(&scifdev->sdev->dev,
- "scif_qp_response err %d\n", err);
- } else {
- schedule_delayed_work(&scifdev->qp_dwork,
- msecs_to_jiffies(1000));
- }
-}
-
-static int scif_setup_scifdev(void)
-{
- /* We support a maximum of 129 SCIF nodes including the mgmt node */
-#define MAX_SCIF_NODES 129
- int i;
- u8 num_nodes = MAX_SCIF_NODES;
-
- scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
- if (!scif_dev)
- return -ENOMEM;
- for (i = 0; i < num_nodes; i++) {
- struct scif_dev *scifdev = &scif_dev[i];
-
- scifdev->node = i;
- scifdev->exit = OP_IDLE;
- init_waitqueue_head(&scifdev->disconn_wq);
- mutex_init(&scifdev->lock);
- INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
- INIT_DELAYED_WORK(&scifdev->p2p_dwork,
- scif_poll_qp_state);
- INIT_DELAYED_WORK(&scifdev->qp_dwork,
- scif_qp_setup_handler);
- INIT_LIST_HEAD(&scifdev->p2p);
- RCU_INIT_POINTER(scifdev->spdev, NULL);
- }
- return 0;
-}
-
-static void scif_destroy_scifdev(void)
-{
- kfree(scif_dev);
- scif_dev = NULL;
-}
-
-static int scif_probe(struct scif_hw_dev *sdev)
-{
- struct scif_dev *scifdev = &scif_dev[sdev->dnode];
- int rc;
-
- dev_set_drvdata(&sdev->dev, sdev);
- scifdev->sdev = sdev;
-
- if (1 == atomic_add_return(1, &g_loopb_cnt)) {
- struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
-
- loopb_dev->sdev = sdev;
- rc = scif_setup_loopback_qp(loopb_dev);
- if (rc)
- goto exit;
- }
-
- rc = scif_setup_intr_wq(scifdev);
- if (rc)
- goto destroy_loopb;
- rc = scif_setup_qp(scifdev);
- if (rc)
- goto destroy_intr;
- scifdev->db = sdev->hw_ops->next_db(sdev);
- scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
- "SCIF_INTR", scifdev,
- scifdev->db);
- if (IS_ERR(scifdev->cookie)) {
- rc = PTR_ERR(scifdev->cookie);
- goto free_qp;
- }
- if (scif_is_mgmt_node()) {
- struct mic_bootparam *bp = sdev->dp;
-
- bp->c2h_scif_db = scifdev->db;
- bp->scif_host_dma_addr = scifdev->qp_dma_addr;
- } else {
- struct mic_bootparam __iomem *bp = sdev->rdp;
-
- iowrite8(scifdev->db, &bp->h2c_scif_db);
- writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
- }
- schedule_delayed_work(&scifdev->qp_dwork,
- msecs_to_jiffies(1000));
- return rc;
-free_qp:
- scif_free_qp(scifdev);
-destroy_intr:
- scif_destroy_intr_wq(scifdev);
-destroy_loopb:
- if (atomic_dec_and_test(&g_loopb_cnt))
- scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
-exit:
- return rc;
-}
-
-void scif_stop(struct scif_dev *scifdev)
-{
- struct scif_dev *dev;
- int i;
-
- for (i = scif_info.maxid; i >= 0; i--) {
- dev = &scif_dev[i];
- if (scifdev_self(dev))
- continue;
- scif_handle_remove_node(i);
- }
-}
-
-static void scif_remove(struct scif_hw_dev *sdev)
-{
- struct scif_dev *scifdev = &scif_dev[sdev->dnode];
-
- if (scif_is_mgmt_node()) {
- struct mic_bootparam *bp = sdev->dp;
-
- bp->c2h_scif_db = -1;
- bp->scif_host_dma_addr = 0x0;
- } else {
- struct mic_bootparam __iomem *bp = sdev->rdp;
-
- iowrite8(-1, &bp->h2c_scif_db);
- writeq(0x0, &bp->scif_card_dma_addr);
- }
- if (scif_is_mgmt_node()) {
- scif_disconnect_node(scifdev->node, true);
- } else {
- scif_info.card_initiated_exit = true;
- scif_stop(scifdev);
- }
- if (atomic_dec_and_test(&g_loopb_cnt))
- scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
- if (scifdev->cookie) {
- sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
- scifdev->cookie = NULL;
- }
- scif_destroy_intr_wq(scifdev);
- cancel_delayed_work(&scifdev->qp_dwork);
- scif_free_qp(scifdev);
- scifdev->rdb = -1;
- scifdev->sdev = NULL;
-}
-
-static struct scif_hw_dev_id id_table[] = {
- { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
- { 0 },
-};
-
-static struct scif_driver scif_driver = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = scif_probe,
- .remove = scif_remove,
-};
-
-static int _scif_init(void)
-{
- int rc;
-
- mutex_init(&scif_info.eplock);
- spin_lock_init(&scif_info.rmalock);
- spin_lock_init(&scif_info.nb_connect_lock);
- spin_lock_init(&scif_info.port_lock);
- mutex_init(&scif_info.conflock);
- mutex_init(&scif_info.connlock);
- mutex_init(&scif_info.fencelock);
- INIT_LIST_HEAD(&scif_info.uaccept);
- INIT_LIST_HEAD(&scif_info.listen);
- INIT_LIST_HEAD(&scif_info.zombie);
- INIT_LIST_HEAD(&scif_info.connected);
- INIT_LIST_HEAD(&scif_info.disconnected);
- INIT_LIST_HEAD(&scif_info.rma);
- INIT_LIST_HEAD(&scif_info.rma_tc);
- INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
- INIT_LIST_HEAD(&scif_info.fence);
- INIT_LIST_HEAD(&scif_info.nb_connect_list);
- init_waitqueue_head(&scif_info.exitwq);
- scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
- scif_info.en_msg_log = 0;
- scif_info.p2p_enable = 1;
- rc = scif_setup_scifdev();
- if (rc)
- goto error;
- unaligned_cache = kmem_cache_create("Unaligned_DMA",
- SCIF_KMEM_UNALIGNED_BUF_SIZE,
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!unaligned_cache) {
- rc = -ENOMEM;
- goto free_sdev;
- }
- INIT_WORK(&scif_info.misc_work, scif_misc_handler);
- INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
- INIT_WORK(&scif_info.conn_work, scif_conn_handler);
- idr_init(&scif_ports);
- return 0;
-free_sdev:
- scif_destroy_scifdev();
-error:
- return rc;
-}
-
-static void _scif_exit(void)
-{
- idr_destroy(&scif_ports);
- kmem_cache_destroy(unaligned_cache);
- scif_destroy_scifdev();
-}
-
-static int __init scif_init(void)
-{
- struct miscdevice *mdev = &scif_info.mdev;
- int rc;
-
- _scif_init();
- iova_cache_get();
- rc = scif_peer_bus_init();
- if (rc)
- goto exit;
- rc = scif_register_driver(&scif_driver);
- if (rc)
- goto peer_bus_exit;
- rc = misc_register(mdev);
- if (rc)
- goto unreg_scif;
- scif_init_debugfs();
- return 0;
-unreg_scif:
- scif_unregister_driver(&scif_driver);
-peer_bus_exit:
- scif_peer_bus_exit();
-exit:
- _scif_exit();
- return rc;
-}
-
-static void __exit scif_exit(void)
-{
- scif_exit_debugfs();
- misc_deregister(&scif_info.mdev);
- scif_unregister_driver(&scif_driver);
- scif_peer_bus_exit();
- iova_cache_put();
- _scif_exit();
-}
-
-module_init(scif_init);
-module_exit(scif_exit);
-
-MODULE_DEVICE_TABLE(scif, id_table);
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) SCIF driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h
deleted file mode 100644
index bb3ab97d5b35..000000000000
--- a/drivers/misc/mic/scif/scif_main.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_MAIN_H
-#define SCIF_MAIN_H
-
-#include <linux/sched/signal.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/dmaengine.h>
-#include <linux/iova.h>
-#include <linux/anon_inodes.h>
-#include <linux/file.h>
-#include <linux/vmalloc.h>
-#include <linux/scif.h>
-#include "../common/mic_dev.h"
-
-#define SCIF_MGMT_NODE 0
-#define SCIF_DEFAULT_WATCHDOG_TO 30
-#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ)
-#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ)
-#define SCIF_RMA_TEMP_CACHE_LIMIT 0x20000
-
-/*
- * Generic state used for certain node QP message exchanges
- * like Unregister, Alloc etc.
- */
-enum scif_msg_state {
- OP_IDLE = 1,
- OP_IN_PROGRESS,
- OP_COMPLETED,
- OP_FAILED
-};
-
-/*
- * struct scif_info - Global SCIF information
- *
- * @nodeid: Node ID this node is to others
- * @maxid: Max known node ID
- * @total: Total number of SCIF nodes
- * @nr_zombies: number of zombie endpoints
- * @eplock: Lock to synchronize listening, zombie endpoint lists
- * @connlock: Lock to synchronize connected and disconnected lists
- * @nb_connect_lock: Synchronize non blocking connect operations
- * @port_lock: Synchronize access to SCIF ports
- * @uaccept: List of user acceptreq waiting for acceptreg
- * @listen: List of listening end points
- * @zombie: List of zombie end points with pending RMA's
- * @connected: List of end points in connected state
- * @disconnected: List of end points in disconnected state
- * @nb_connect_list: List for non blocking connections
- * @misc_work: miscellaneous SCIF tasks
- * @conflock: Lock to synchronize SCIF node configuration changes
- * @en_msg_log: Enable debug message logging
- * @p2p_enable: Enable P2P SCIF network
- * @mdev: The MISC device
- * @conn_work: Work for workqueue handling all connections
- * @exitwq: Wait queue for waiting for an EXIT node QP message response
- * @loopb_dev: Dummy SCIF device used for loopback
- * @loopb_wq: Workqueue used for handling loopback messages
- * @loopb_wqname[16]: Name of loopback workqueue
- * @loopb_work: Used for submitting work to loopb_wq
- * @loopb_recv_q: List of messages received on the loopb_wq
- * @card_initiated_exit: set when the card has initiated the exit
- * @rmalock: Synchronize access to RMA operations
- * @fencelock: Synchronize access to list of remote fences requested.
- * @rma: List of temporary registered windows to be destroyed.
- * @rma_tc: List of temporary registered & cached Windows to be destroyed
- * @fence: List of remote fence requests
- * @mmu_notif_work: Work for registration caching MMU notifier workqueue
- * @mmu_notif_cleanup: List of temporary cached windows for reg cache
- * @rma_tc_limit: RMA temporary cache limit
- */
-struct scif_info {
- u8 nodeid;
- u8 maxid;
- u8 total;
- u32 nr_zombies;
- struct mutex eplock;
- struct mutex connlock;
- spinlock_t nb_connect_lock;
- spinlock_t port_lock;
- struct list_head uaccept;
- struct list_head listen;
- struct list_head zombie;
- struct list_head connected;
- struct list_head disconnected;
- struct list_head nb_connect_list;
- struct work_struct misc_work;
- struct mutex conflock;
- u8 en_msg_log;
- u8 p2p_enable;
- struct miscdevice mdev;
- struct work_struct conn_work;
- wait_queue_head_t exitwq;
- struct scif_dev *loopb_dev;
- struct workqueue_struct *loopb_wq;
- char loopb_wqname[16];
- struct work_struct loopb_work;
- struct list_head loopb_recv_q;
- bool card_initiated_exit;
- spinlock_t rmalock;
- struct mutex fencelock;
- struct list_head rma;
- struct list_head rma_tc;
- struct list_head fence;
- struct work_struct mmu_notif_work;
- struct list_head mmu_notif_cleanup;
- unsigned long rma_tc_limit;
-};
-
-/*
- * struct scif_p2p_info - SCIF mapping information used for P2P
- *
- * @ppi_peer_id - SCIF peer node id
- * @ppi_sg - Scatter list for bar information (One for mmio and one for aper)
- * @sg_nentries - Number of entries in the scatterlist
- * @ppi_da: DMA address for MMIO and APER bars
- * @ppi_len: Length of MMIO and APER bars
- * @ppi_list: Link in list of mapping information
- */
-struct scif_p2p_info {
- u8 ppi_peer_id;
- struct scatterlist *ppi_sg[2];
- u64 sg_nentries[2];
- dma_addr_t ppi_da[2];
- u64 ppi_len[2];
-#define SCIF_PPI_MMIO 0
-#define SCIF_PPI_APER 1
- struct list_head ppi_list;
-};
-
-/*
- * struct scif_dev - SCIF remote device specific fields
- *
- * @node: Node id
- * @p2p: List of P2P mapping information
- * @qpairs: The node queue pair for exchanging control messages
- * @intr_wq: Workqueue for handling Node QP messages
- * @intr_wqname: Name of node QP workqueue for handling interrupts
- * @intr_bh: Used for submitting work to intr_wq
- * @lock: Lock used for synchronizing access to the scif device
- * @sdev: SCIF hardware device on the SCIF hardware bus
- * @db: doorbell the peer will trigger to generate an interrupt on self
- * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer
- * @cookie: Cookie received while registering the interrupt handler
- * @peer_add_work: Work for handling device_add for peer devices
- * @p2p_dwork: Delayed work to enable polling for P2P state
- * @qp_dwork: Delayed work for enabling polling for remote QP information
- * @p2p_retry: Number of times to retry polling of P2P state
- * @base_addr: P2P aperture bar base address
- * @mic_mw mmio: The peer MMIO information used for P2P
- * @spdev: SCIF peer device on the SCIF peer bus
- * @node_remove_ack_pending: True if a node_remove_ack is pending
- * @exit_ack_pending: true if an exit_ack is pending
- * @disconn_wq: Used while waiting for a node remove response
- * @disconn_rescnt: Keeps track of number of node remove requests sent
- * @exit: Status of exit message
- * @qp_dma_addr: Queue pair DMA address passed to the peer
- * @dma_ch_idx: Round robin index for DMA channels
- * @signal_pool: DMA pool used for scheduling scif_fence_signal DMA's
-*/
-struct scif_dev {
- u8 node;
- struct list_head p2p;
- struct scif_qp *qpairs;
- struct workqueue_struct *intr_wq;
- char intr_wqname[16];
- struct work_struct intr_bh;
- struct mutex lock;
- struct scif_hw_dev *sdev;
- int db;
- int rdb;
- struct mic_irq *cookie;
- struct work_struct peer_add_work;
- struct delayed_work p2p_dwork;
- struct delayed_work qp_dwork;
- int p2p_retry;
- dma_addr_t base_addr;
- struct mic_mw mmio;
- struct scif_peer_dev __rcu *spdev;
- bool node_remove_ack_pending;
- bool exit_ack_pending;
- wait_queue_head_t disconn_wq;
- atomic_t disconn_rescnt;
- enum scif_msg_state exit;
- dma_addr_t qp_dma_addr;
- int dma_ch_idx;
- struct dma_pool *signal_pool;
-};
-
-extern bool scif_reg_cache_enable;
-extern bool scif_ulimit_check;
-extern struct scif_info scif_info;
-extern struct idr scif_ports;
-extern struct bus_type scif_peer_bus;
-extern struct scif_dev *scif_dev;
-extern const struct file_operations scif_fops;
-extern const struct file_operations scif_anon_fops;
-
-/* Size of the RB for the Node QP */
-#define SCIF_NODE_QP_SIZE 0x10000
-
-#include "scif_nodeqp.h"
-#include "scif_rma.h"
-#include "scif_rma_list.h"
-
-/*
- * scifdev_self:
- * @dev: The remote SCIF Device
- *
- * Returns true if the SCIF Device passed is the self aka Loopback SCIF device.
- */
-static inline int scifdev_self(struct scif_dev *dev)
-{
- return dev->node == scif_info.nodeid;
-}
-
-static inline bool scif_is_mgmt_node(void)
-{
- return !scif_info.nodeid;
-}
-
-/*
- * scifdev_is_p2p:
- * @dev: The remote SCIF Device
- *
- * Returns true if the SCIF Device is a MIC Peer to Peer SCIF device.
- */
-static inline bool scifdev_is_p2p(struct scif_dev *dev)
-{
- if (scif_is_mgmt_node())
- return false;
- else
- return dev != &scif_dev[SCIF_MGMT_NODE] &&
- !scifdev_self(dev);
-}
-
-/*
- * scifdev_alive:
- * @scifdev: The remote SCIF Device
- *
- * Returns true if the remote SCIF Device is running or sleeping for
- * this endpoint.
- */
-static inline int _scifdev_alive(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
-
- rcu_read_lock();
- spdev = rcu_dereference(scifdev->spdev);
- rcu_read_unlock();
- return !!spdev;
-}
-
-#include "scif_epd.h"
-
-void __init scif_init_debugfs(void);
-void scif_exit_debugfs(void);
-int scif_setup_intr_wq(struct scif_dev *scifdev);
-void scif_destroy_intr_wq(struct scif_dev *scifdev);
-void scif_cleanup_scifdev(struct scif_dev *dev);
-void scif_handle_remove_node(int node);
-void scif_disconnect_node(u32 node_id, bool mgmt_initiated);
-void scif_free_qp(struct scif_dev *dev);
-void scif_misc_handler(struct work_struct *work);
-void scif_stop(struct scif_dev *scifdev);
-irqreturn_t scif_intr_handler(int irq, void *data);
-#endif /* SCIF_MAIN_H */
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
deleted file mode 100644
index 96b760819bfc..000000000000
--- a/drivers/misc/mic/scif/scif_map.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_MAP_H
-#define SCIF_MAP_H
-
-#include "../bus/scif_bus.h"
-
-static __always_inline void *
-scif_alloc_coherent(dma_addr_t *dma_handle,
- struct scif_dev *scifdev, size_t size,
- gfp_t gfp)
-{
- void *va;
-
- if (scifdev_self(scifdev)) {
- va = kmalloc(size, gfp);
- if (va)
- *dma_handle = virt_to_phys(va);
- } else {
- va = dma_alloc_coherent(&scifdev->sdev->dev,
- size, dma_handle, gfp);
- if (va && scifdev_is_p2p(scifdev))
- *dma_handle = *dma_handle + scifdev->base_addr;
- }
- return va;
-}
-
-static __always_inline void
-scif_free_coherent(void *va, dma_addr_t local,
- struct scif_dev *scifdev, size_t size)
-{
- if (scifdev_self(scifdev)) {
- kfree(va);
- } else {
- if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
- local = local - scifdev->base_addr;
- dma_free_coherent(&scifdev->sdev->dev,
- size, va, local);
- }
-}
-
-static __always_inline int
-scif_map_single(dma_addr_t *dma_handle,
- void *local, struct scif_dev *scifdev, size_t size)
-{
- int err = 0;
-
- if (scifdev_self(scifdev)) {
- *dma_handle = virt_to_phys((local));
- } else {
- *dma_handle = dma_map_single(&scifdev->sdev->dev,
- local, size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&scifdev->sdev->dev, *dma_handle))
- err = -ENOMEM;
- else if (scifdev_is_p2p(scifdev))
- *dma_handle = *dma_handle + scifdev->base_addr;
- }
- if (err)
- *dma_handle = 0;
- return err;
-}
-
-static __always_inline void
-scif_unmap_single(dma_addr_t local, struct scif_dev *scifdev,
- size_t size)
-{
- if (!scifdev_self(scifdev)) {
- if (scifdev_is_p2p(scifdev))
- local = local - scifdev->base_addr;
- dma_unmap_single(&scifdev->sdev->dev, local,
- size, DMA_BIDIRECTIONAL);
- }
-}
-
-static __always_inline void *
-scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
-{
- void *out_virt;
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- if (scifdev_self(scifdev))
- out_virt = phys_to_virt(phys);
- else
- out_virt = (void __force *)
- sdev->hw_ops->remap(sdev, phys, size);
- return out_virt;
-}
-
-static __always_inline void
-scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
-{
- if (!scifdev_self(scifdev)) {
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- sdev->hw_ops->unmap(sdev, (void __force __iomem *)virt);
- }
-}
-
-static __always_inline int
-scif_map_page(dma_addr_t *dma_handle, struct page *page,
- struct scif_dev *scifdev)
-{
- int err = 0;
-
- if (scifdev_self(scifdev)) {
- *dma_handle = page_to_phys(page);
- } else {
- struct scif_hw_dev *sdev = scifdev->sdev;
- *dma_handle = dma_map_page(&sdev->dev,
- page, 0x0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&sdev->dev, *dma_handle))
- err = -ENOMEM;
- else if (scifdev_is_p2p(scifdev))
- *dma_handle = *dma_handle + scifdev->base_addr;
- }
- if (err)
- *dma_handle = 0;
- return err;
-}
-#endif /* SCIF_MAP_H */
diff --git a/drivers/misc/mic/scif/scif_mmap.c b/drivers/misc/mic/scif/scif_mmap.c
deleted file mode 100644
index a151d416f39c..000000000000
--- a/drivers/misc/mic/scif/scif_mmap.c
+++ /dev/null
@@ -1,690 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-
-/*
- * struct scif_vma_info - Information about a remote memory mapping
- * created via scif_mmap(..)
- * @vma: VM area struct
- * @list: link to list of active vmas
- */
-struct scif_vma_info {
- struct vm_area_struct *vma;
- struct list_head list;
-};
-
-void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- struct scif_window *recv_window =
- (struct scif_window *)msg->payload[0];
- struct scif_endpt *ep;
-
- ep = (struct scif_endpt *)recv_window->ep;
- req.out_window = &window;
- req.offset = recv_window->offset;
- req.prot = recv_window->prot;
- req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
- req.type = SCIF_WINDOW_FULL;
- req.head = &ep->rma_info.reg_list;
- msg->payload[0] = ep->remote_ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- if (scif_query_window(&req)) {
- dev_err(&scifdev->sdev->dev,
- "%s %d -ENXIO\n", __func__, __LINE__);
- msg->uop = SCIF_UNREGISTER_ACK;
- goto error;
- }
-
- scif_put_window(window, window->nr_pages);
-
- if (!window->ref_count) {
- atomic_inc(&ep->rma_info.tw_refcount);
- ep->rma_info.async_list_del = 1;
- list_del_init(&window->list);
- scif_free_window_offset(ep, window, window->offset);
- }
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (window && !window->ref_count)
- scif_queue_for_cleanup(window, &scif_info.rma);
-}
-
-/*
- * Remove valid remote memory mappings created via scif_mmap(..) from the
- * process address space since the remote node is lost
- */
-static void __scif_zap_mmaps(struct scif_endpt *ep)
-{
- struct list_head *item;
- struct scif_vma_info *info;
- struct vm_area_struct *vma;
- unsigned long size;
-
- spin_lock(&ep->lock);
- list_for_each(item, &ep->rma_info.vma_list) {
- info = list_entry(item, struct scif_vma_info, list);
- vma = info->vma;
- size = vma->vm_end - vma->vm_start;
- zap_vma_ptes(vma, vma->vm_start, size);
- dev_dbg(scif_info.mdev.this_device,
- "%s ep %p zap vma %p size 0x%lx\n",
- __func__, ep, info->vma, size);
- }
- spin_unlock(&ep->lock);
-}
-
-/*
- * Traverse the list of endpoints for a particular remote node and
- * zap valid remote memory mappings since the remote node is lost
- */
-static void _scif_zap_mmaps(int node, struct list_head *head)
-{
- struct scif_endpt *ep;
- struct list_head *item;
-
- mutex_lock(&scif_info.connlock);
- list_for_each(item, head) {
- ep = list_entry(item, struct scif_endpt, list);
- if (ep->remote_dev->node == node)
- __scif_zap_mmaps(ep);
- }
- mutex_unlock(&scif_info.connlock);
-}
-
-/*
- * Wrapper for removing remote memory mappings for a particular node. This API
- * is called by peer nodes as part of handling a lost node.
- */
-void scif_zap_mmaps(int node)
-{
- _scif_zap_mmaps(node, &scif_info.connected);
- _scif_zap_mmaps(node, &scif_info.disconnected);
-}
-
-/*
- * This API is only called while handling a lost node:
- * a) Remote node is dead.
- * b) Remote memory mappings have been zapped
- * So we can traverse the remote_reg_list without any locks. Since
- * the window has not yet been unregistered we can drop the ref count
- * and queue it to the cleanup thread.
- */
-static void __scif_cleanup_rma_for_zombies(struct scif_endpt *ep)
-{
- struct list_head *pos, *tmp;
- struct scif_window *window;
-
- list_for_each_safe(pos, tmp, &ep->rma_info.remote_reg_list) {
- window = list_entry(pos, struct scif_window, list);
- if (window->ref_count)
- scif_put_window(window, window->nr_pages);
- else
- dev_err(scif_info.mdev.this_device,
- "%s %d unexpected\n",
- __func__, __LINE__);
- if (!window->ref_count) {
- atomic_inc(&ep->rma_info.tw_refcount);
- list_del_init(&window->list);
- scif_queue_for_cleanup(window, &scif_info.rma);
- }
- }
-}
-
-/* Cleanup remote registration lists for zombie endpoints */
-void scif_cleanup_rma_for_zombies(int node)
-{
- struct scif_endpt *ep;
- struct list_head *item;
-
- mutex_lock(&scif_info.eplock);
- list_for_each(item, &scif_info.zombie) {
- ep = list_entry(item, struct scif_endpt, list);
- if (ep->remote_dev && ep->remote_dev->node == node)
- __scif_cleanup_rma_for_zombies(ep);
- }
- mutex_unlock(&scif_info.eplock);
- flush_work(&scif_info.misc_work);
-}
-
-/* Insert the VMA into the per endpoint VMA list */
-static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
-{
- struct scif_vma_info *info;
- int err = 0;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto done;
- }
- info->vma = vma;
- spin_lock(&ep->lock);
- list_add_tail(&info->list, &ep->rma_info.vma_list);
- spin_unlock(&ep->lock);
-done:
- return err;
-}
-
-/* Delete the VMA from the per endpoint VMA list */
-static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
-{
- struct list_head *item;
- struct scif_vma_info *info;
-
- spin_lock(&ep->lock);
- list_for_each(item, &ep->rma_info.vma_list) {
- info = list_entry(item, struct scif_vma_info, list);
- if (info->vma == vma) {
- list_del(&info->list);
- kfree(info);
- break;
- }
- }
- spin_unlock(&ep->lock);
-}
-
-static phys_addr_t scif_get_phys(phys_addr_t phys, struct scif_endpt *ep)
-{
- struct scif_dev *scifdev = (struct scif_dev *)ep->remote_dev;
- struct scif_hw_dev *sdev = scifdev->sdev;
- phys_addr_t out_phys, apt_base = 0;
-
- /*
- * If the DMA address is card relative then we need to add the
- * aperture base for mmap to work correctly
- */
- if (!scifdev_self(scifdev) && sdev->aper && sdev->card_rel_da)
- apt_base = sdev->aper->pa;
- out_phys = apt_base + phys;
- return out_phys;
-}
-
-int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
- struct scif_range **pages)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- int nr_pages, err, i;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI get_pinned_pages: ep %p offset 0x%lx len 0x%lx\n",
- ep, offset, len);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- if (!len || (offset < 0) ||
- (offset + len < offset) ||
- (ALIGN(offset, PAGE_SIZE) != offset) ||
- (ALIGN(len, PAGE_SIZE) != len))
- return -EINVAL;
-
- nr_pages = len >> PAGE_SHIFT;
-
- req.out_window = &window;
- req.offset = offset;
- req.prot = 0;
- req.nr_bytes = len;
- req.type = SCIF_WINDOW_SINGLE;
- req.head = &ep->rma_info.remote_reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error;
- }
-
- /* Allocate scif_range */
- *pages = kzalloc(sizeof(**pages), GFP_KERNEL);
- if (!*pages) {
- err = -ENOMEM;
- goto error;
- }
-
- /* Allocate phys addr array */
- (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t));
- if (!((*pages)->phys_addr)) {
- err = -ENOMEM;
- goto error;
- }
-
- if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev)) {
- /* Allocate virtual address array */
- ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *)));
- if (!(*pages)->va) {
- err = -ENOMEM;
- goto error;
- }
- }
- /* Populate the values */
- (*pages)->cookie = window;
- (*pages)->nr_pages = nr_pages;
- (*pages)->prot_flags = window->prot;
-
- for (i = 0; i < nr_pages; i++) {
- (*pages)->phys_addr[i] =
- __scif_off_to_dma_addr(window, offset +
- (i * PAGE_SIZE));
- (*pages)->phys_addr[i] = scif_get_phys((*pages)->phys_addr[i],
- ep);
- if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev))
- (*pages)->va[i] =
- ep->remote_dev->sdev->aper->va +
- (*pages)->phys_addr[i] -
- ep->remote_dev->sdev->aper->pa;
- }
-
- scif_get_window(window, nr_pages);
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (err) {
- if (*pages) {
- scif_free((*pages)->phys_addr,
- nr_pages * sizeof(dma_addr_t));
- scif_free((*pages)->va,
- nr_pages * sizeof(void *));
- kfree(*pages);
- *pages = NULL;
- }
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_get_pages);
-
-int scif_put_pages(struct scif_range *pages)
-{
- struct scif_endpt *ep;
- struct scif_window *window;
- struct scifmsg msg;
-
- if (!pages || !pages->cookie)
- return -EINVAL;
-
- window = pages->cookie;
-
- if (!window || window->magic != SCIFEP_MAGIC)
- return -EINVAL;
-
- ep = (struct scif_endpt *)window->ep;
- /*
- * If the state is SCIFEP_CONNECTED or SCIFEP_DISCONNECTED then the
- * callee should be allowed to release references to the pages,
- * else the endpoint was not connected in the first place,
- * hence the ENOTCONN.
- */
- if (ep->state != SCIFEP_CONNECTED && ep->state != SCIFEP_DISCONNECTED)
- return -ENOTCONN;
-
- mutex_lock(&ep->rma_info.rma_lock);
-
- scif_put_window(window, pages->nr_pages);
-
- /* Initiate window destruction if ref count is zero */
- if (!window->ref_count) {
- list_del(&window->list);
- mutex_unlock(&ep->rma_info.rma_lock);
- scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan);
- /* Inform the peer about this window being destroyed. */
- msg.uop = SCIF_MUNMAP;
- msg.src = ep->port;
- msg.payload[0] = window->peer_window;
- /* No error handling for notification messages */
- scif_nodeqp_send(ep->remote_dev, &msg);
- /* Destroy this window from the peer's registered AS */
- scif_destroy_remote_window(window);
- } else {
- mutex_unlock(&ep->rma_info.rma_lock);
- }
-
- scif_free(pages->phys_addr, pages->nr_pages * sizeof(dma_addr_t));
- scif_free(pages->va, pages->nr_pages * sizeof(void *));
- kfree(pages);
- return 0;
-}
-EXPORT_SYMBOL_GPL(scif_put_pages);
-
-/*
- * scif_rma_list_mmap:
- *
- * Traverse the remote registration list starting from start_window:
- * 1) Create VtoP mappings via remap_pfn_range(..)
- * 2) Once step 1) and 2) complete successfully then traverse the range of
- * windows again and bump the reference count.
- * RMA lock must be held.
- */
-static int scif_rma_list_mmap(struct scif_window *start_window, s64 offset,
- int nr_pages, struct vm_area_struct *vma)
-{
- s64 end_offset, loop_offset = offset;
- struct scif_window *window = start_window;
- int loop_nr_pages, nr_pages_left = nr_pages;
- struct scif_endpt *ep = (struct scif_endpt *)start_window->ep;
- struct list_head *head = &ep->rma_info.remote_reg_list;
- int i, err = 0;
- dma_addr_t phys_addr;
- struct scif_window_iter src_win_iter;
- size_t contig_bytes = 0;
-
- might_sleep();
- list_for_each_entry_from(window, head, list) {
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min_t(int,
- (end_offset - loop_offset) >> PAGE_SHIFT,
- nr_pages_left);
- scif_init_window_iter(window, &src_win_iter);
- for (i = 0; i < loop_nr_pages; i++) {
- phys_addr = scif_off_to_dma_addr(window, loop_offset,
- &contig_bytes,
- &src_win_iter);
- phys_addr = scif_get_phys(phys_addr, ep);
- err = remap_pfn_range(vma,
- vma->vm_start +
- loop_offset - offset,
- phys_addr >> PAGE_SHIFT,
- PAGE_SIZE,
- vma->vm_page_prot);
- if (err)
- goto error;
- loop_offset += PAGE_SIZE;
- }
- nr_pages_left -= loop_nr_pages;
- if (!nr_pages_left)
- break;
- }
- /*
- * No more failures expected. Bump up the ref count for all
- * the windows. Another traversal from start_window required
- * for handling errors encountered across windows during
- * remap_pfn_range(..).
- */
- loop_offset = offset;
- nr_pages_left = nr_pages;
- window = start_window;
- head = &ep->rma_info.remote_reg_list;
- list_for_each_entry_from(window, head, list) {
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min_t(int,
- (end_offset - loop_offset) >> PAGE_SHIFT,
- nr_pages_left);
- scif_get_window(window, loop_nr_pages);
- nr_pages_left -= loop_nr_pages;
- loop_offset += (loop_nr_pages << PAGE_SHIFT);
- if (!nr_pages_left)
- break;
- }
-error:
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-
-/*
- * scif_rma_list_munmap:
- *
- * Traverse the remote registration list starting from window:
- * 1) Decrement ref count.
- * 2) If the ref count drops to zero then send a SCIF_MUNMAP message to peer.
- * RMA lock must be held.
- */
-static void scif_rma_list_munmap(struct scif_window *start_window,
- s64 offset, int nr_pages)
-{
- struct scifmsg msg;
- s64 loop_offset = offset, end_offset;
- int loop_nr_pages, nr_pages_left = nr_pages;
- struct scif_endpt *ep = (struct scif_endpt *)start_window->ep;
- struct list_head *head = &ep->rma_info.remote_reg_list;
- struct scif_window *window = start_window, *_window;
-
- msg.uop = SCIF_MUNMAP;
- msg.src = ep->port;
- loop_offset = offset;
- nr_pages_left = nr_pages;
- list_for_each_entry_safe_from(window, _window, head, list) {
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min_t(int,
- (end_offset - loop_offset) >> PAGE_SHIFT,
- nr_pages_left);
- scif_put_window(window, loop_nr_pages);
- if (!window->ref_count) {
- struct scif_dev *rdev = ep->remote_dev;
-
- scif_drain_dma_intr(rdev->sdev,
- ep->rma_info.dma_chan);
- /* Inform the peer about this munmap */
- msg.payload[0] = window->peer_window;
- /* No error handling for Notification messages. */
- scif_nodeqp_send(ep->remote_dev, &msg);
- list_del(&window->list);
- /* Destroy this window from the peer's registered AS */
- scif_destroy_remote_window(window);
- }
- nr_pages_left -= loop_nr_pages;
- loop_offset += (loop_nr_pages << PAGE_SHIFT);
- if (!nr_pages_left)
- break;
- }
-}
-
-/*
- * The private data field of each VMA used to mmap a remote window
- * points to an instance of struct vma_pvt
- */
-struct vma_pvt {
- struct scif_endpt *ep; /* End point for remote window */
- s64 offset; /* offset within remote window */
- bool valid_offset; /* offset is valid only if the original
- * mmap request was for a single page
- * else the offset within the vma is
- * the correct offset
- */
- struct kref ref;
-};
-
-static void vma_pvt_release(struct kref *ref)
-{
- struct vma_pvt *vmapvt = container_of(ref, struct vma_pvt, ref);
-
- kfree(vmapvt);
-}
-
-/**
- * scif_vma_open - VMA open driver callback
- * @vma: VMM memory area.
- * The open method is called by the kernel to allow the subsystem implementing
- * the VMA to initialize the area. This method is invoked any time a new
- * reference to the VMA is made (when a process forks, for example).
- * The one exception happens when the VMA is first created by mmap;
- * in this case, the driver's mmap method is called instead.
- * This function is also invoked when an existing VMA is split by the kernel
- * due to a call to munmap on a subset of the VMA resulting in two VMAs.
- * The kernel invokes this function only on one of the two VMAs.
- */
-static void scif_vma_open(struct vm_area_struct *vma)
-{
- struct vma_pvt *vmapvt = vma->vm_private_data;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI vma open: vma_start 0x%lx vma_end 0x%lx\n",
- vma->vm_start, vma->vm_end);
- scif_insert_vma(vmapvt->ep, vma);
- kref_get(&vmapvt->ref);
-}
-
-/**
- * scif_munmap - VMA close driver callback.
- * @vma: VMM memory area.
- * When an area is destroyed, the kernel calls its close operation.
- * Note that there's no usage count associated with VMA's; the area
- * is opened and closed exactly once by each process that uses it.
- */
-static void scif_munmap(struct vm_area_struct *vma)
-{
- struct scif_endpt *ep;
- struct vma_pvt *vmapvt = vma->vm_private_data;
- int nr_pages = vma_pages(vma);
- s64 offset;
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- int err;
-
- might_sleep();
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI munmap: vma_start 0x%lx vma_end 0x%lx\n",
- vma->vm_start, vma->vm_end);
- ep = vmapvt->ep;
- offset = vmapvt->valid_offset ? vmapvt->offset :
- (vma->vm_pgoff) << PAGE_SHIFT;
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI munmap: ep %p nr_pages 0x%x offset 0x%llx\n",
- ep, nr_pages, offset);
- req.out_window = &window;
- req.offset = offset;
- req.nr_bytes = vma->vm_end - vma->vm_start;
- req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
- req.type = SCIF_WINDOW_PARTIAL;
- req.head = &ep->rma_info.remote_reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
-
- err = scif_query_window(&req);
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- else
- scif_rma_list_munmap(window, offset, nr_pages);
-
- mutex_unlock(&ep->rma_info.rma_lock);
- /*
- * The kernel probably zeroes these out but we still want
- * to clean up our own mess just in case.
- */
- vma->vm_ops = NULL;
- vma->vm_private_data = NULL;
- kref_put(&vmapvt->ref, vma_pvt_release);
- scif_delete_vma(ep, vma);
-}
-
-static const struct vm_operations_struct scif_vm_ops = {
- .open = scif_vma_open,
- .close = scif_munmap,
-};
-
-/**
- * scif_mmap - Map pages in virtual address space to a remote window.
- * @vma: VMM memory area.
- * @epd: endpoint descriptor
- *
- * Return: Upon successful completion, scif_mmap() returns zero
- * else an apt error is returned as documented in scif.h
- */
-int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd)
-{
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- s64 start_offset = vma->vm_pgoff << PAGE_SHIFT;
- int nr_pages = vma_pages(vma);
- int err;
- struct vma_pvt *vmapvt;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI mmap: ep %p start_offset 0x%llx nr_pages 0x%x\n",
- ep, start_offset, nr_pages);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- might_sleep();
-
- err = scif_insert_vma(ep, vma);
- if (err)
- return err;
-
- vmapvt = kzalloc(sizeof(*vmapvt), GFP_KERNEL);
- if (!vmapvt) {
- scif_delete_vma(ep, vma);
- return -ENOMEM;
- }
-
- vmapvt->ep = ep;
- kref_init(&vmapvt->ref);
-
- req.out_window = &window;
- req.offset = start_offset;
- req.nr_bytes = vma->vm_end - vma->vm_start;
- req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
- req.type = SCIF_WINDOW_PARTIAL;
- req.head = &ep->rma_info.remote_reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unlock;
- }
-
- /* Default prot for loopback */
- if (!scifdev_self(ep->remote_dev))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- /*
- * VM_DONTCOPY - Do not copy this vma on fork
- * VM_DONTEXPAND - Cannot expand with mremap()
- * VM_RESERVED - Count as reserved_vm like IO
- * VM_PFNMAP - Page-ranges managed without "struct page"
- * VM_IO - Memory mapped I/O or similar
- *
- * We do not want to copy this VMA automatically on a fork(),
- * expand this VMA due to mremap() or swap out these pages since
- * the VMA is actually backed by physical pages in the remote
- * node's physical memory and not via a struct page.
- */
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
-
- if (!scifdev_self(ep->remote_dev))
- vma->vm_flags |= VM_IO | VM_PFNMAP;
-
- /* Map this range of windows */
- err = scif_rma_list_mmap(window, start_offset, nr_pages, vma);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unlock;
- }
- /* Set up the driver call back */
- vma->vm_ops = &scif_vm_ops;
- vma->vm_private_data = vmapvt;
-error_unlock:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (err) {
- kfree(vmapvt);
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- scif_delete_vma(ep, vma);
- }
- return err;
-}
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
deleted file mode 100644
index c4d9422082b7..000000000000
--- a/drivers/misc/mic/scif/scif_nm.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_peer_bus.h"
-
-#include "scif_main.h"
-#include "scif_map.h"
-
-/**
- * scif_invalidate_ep() - Set state for all connected endpoints
- * to disconnected and wake up all send/recv waitqueues
- *
- * @node: Node to invalidate
- */
-static void scif_invalidate_ep(int node)
-{
- struct scif_endpt *ep;
- struct list_head *pos, *tmpq;
-
- flush_work(&scif_info.conn_work);
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (ep->remote_dev->node == node) {
- scif_unmap_all_windows(ep);
- spin_lock(&ep->lock);
- scif_cleanup_ep_qp(ep);
- spin_unlock(&ep->lock);
- }
- }
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (ep->remote_dev->node == node) {
- list_del(pos);
- spin_lock(&ep->lock);
- ep->state = SCIFEP_DISCONNECTED;
- list_add_tail(&ep->list, &scif_info.disconnected);
- scif_cleanup_ep_qp(ep);
- wake_up_interruptible(&ep->sendwq);
- wake_up_interruptible(&ep->recvwq);
- spin_unlock(&ep->lock);
- scif_unmap_all_windows(ep);
- }
- }
- mutex_unlock(&scif_info.connlock);
-}
-
-void scif_free_qp(struct scif_dev *scifdev)
-{
- struct scif_qp *qp = scifdev->qpairs;
-
- if (!qp)
- return;
- scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size);
- kfree(qp->inbound_q.rb_base);
- scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
- kfree(scifdev->qpairs);
- scifdev->qpairs = NULL;
-}
-
-static void scif_cleanup_qp(struct scif_dev *dev)
-{
- struct scif_qp *qp = &dev->qpairs[0];
-
- if (!qp)
- return;
- scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
- scif_iounmap((void *)qp->outbound_q.rb_base,
- sizeof(struct scif_qp), dev);
- qp->remote_qp = NULL;
- qp->local_write = 0;
- qp->inbound_q.current_write_offset = 0;
- qp->inbound_q.current_read_offset = 0;
- if (scifdev_is_p2p(dev))
- scif_free_qp(dev);
-}
-
-void scif_send_acks(struct scif_dev *dev)
-{
- struct scifmsg msg;
-
- if (dev->node_remove_ack_pending) {
- msg.uop = SCIF_NODE_REMOVE_ACK;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = SCIF_MGMT_NODE;
- msg.payload[0] = dev->node;
- scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
- dev->node_remove_ack_pending = false;
- }
- if (dev->exit_ack_pending) {
- msg.uop = SCIF_EXIT_ACK;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = dev->node;
- scif_nodeqp_send(dev, &msg);
- dev->exit_ack_pending = false;
- }
-}
-
-/**
- * scif_cleanup_scifdev - Uninitialize SCIF data structures for remote
- * SCIF device.
- * @dev: Remote SCIF device.
- */
-void scif_cleanup_scifdev(struct scif_dev *dev)
-{
- struct scif_hw_dev *sdev = dev->sdev;
-
- if (!dev->sdev)
- return;
- if (scifdev_is_p2p(dev)) {
- if (dev->cookie) {
- sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
- dev->cookie = NULL;
- }
- scif_destroy_intr_wq(dev);
- }
- flush_work(&scif_info.misc_work);
- scif_destroy_p2p(dev);
- scif_invalidate_ep(dev->node);
- scif_zap_mmaps(dev->node);
- scif_cleanup_rma_for_zombies(dev->node);
- flush_work(&scif_info.misc_work);
- scif_send_acks(dev);
- if (!dev->node && scif_info.card_initiated_exit) {
- /*
- * Send an SCIF_EXIT message which is the last message from MIC
- * to the Host and wait for a SCIF_EXIT_ACK
- */
- scif_send_exit(dev);
- scif_info.card_initiated_exit = false;
- }
- scif_cleanup_qp(dev);
-}
-
-/**
- * scif_remove_node
- *
- * @node: Node to remove
- */
-void scif_handle_remove_node(int node)
-{
- struct scif_dev *scifdev = &scif_dev[node];
-
- if (scif_peer_unregister_device(scifdev))
- scif_send_acks(scifdev);
-}
-
-static int scif_send_rmnode_msg(int node, int remove_node)
-{
- struct scifmsg notif_msg;
- struct scif_dev *dev = &scif_dev[node];
-
- notif_msg.uop = SCIF_NODE_REMOVE;
- notif_msg.src.node = scif_info.nodeid;
- notif_msg.dst.node = node;
- notif_msg.payload[0] = remove_node;
- return scif_nodeqp_send(dev, &notif_msg);
-}
-
-/**
- * scif_node_disconnect
- *
- * @node_id: source node id [in]
- * @mgmt_initiated: Disconnection initiated from the mgmt node
- *
- * Disconnect a node from the scif network.
- */
-void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
-{
- int ret;
- int msg_cnt = 0;
- u32 i = 0;
- struct scif_dev *scifdev = &scif_dev[node_id];
-
- if (!node_id)
- return;
-
- atomic_set(&scifdev->disconn_rescnt, 0);
-
- /* Destroy p2p network */
- for (i = 1; i <= scif_info.maxid; i++) {
- if (i == node_id)
- continue;
- ret = scif_send_rmnode_msg(i, node_id);
- if (!ret)
- msg_cnt++;
- }
- /* Wait for the remote nodes to respond with SCIF_NODE_REMOVE_ACK */
- ret = wait_event_timeout(scifdev->disconn_wq,
- (atomic_read(&scifdev->disconn_rescnt)
- == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
- /* Tell the card to clean up */
- if (mgmt_initiated && _scifdev_alive(scifdev))
- /*
- * Send an SCIF_EXIT message which is the last message from Host
- * to the MIC and wait for a SCIF_EXIT_ACK
- */
- scif_send_exit(scifdev);
- atomic_set(&scifdev->disconn_rescnt, 0);
- /* Tell the mgmt node to clean up */
- ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
- if (!ret)
- /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */
- wait_event_timeout(scifdev->disconn_wq,
- (atomic_read(&scifdev->disconn_rescnt) == 1),
- SCIF_NODE_ALIVE_TIMEOUT);
-}
-
-void scif_get_node_info(void)
-{
- struct scifmsg msg;
- DECLARE_COMPLETION_ONSTACK(node_info);
-
- msg.uop = SCIF_GET_NODE_INFO;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = SCIF_MGMT_NODE;
- msg.payload[3] = (u64)&node_info;
-
- if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
- return;
-
- /* Wait for a response with SCIF_GET_NODE_INFO */
- wait_for_completion(&node_info);
-}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
deleted file mode 100644
index 384ce08fa98a..000000000000
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ /dev/null
@@ -1,1349 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "../bus/scif_bus.h"
-#include "scif_peer_bus.h"
-#include "scif_main.h"
-#include "scif_nodeqp.h"
-#include "scif_map.h"
-
-/*
- ************************************************************************
- * SCIF node Queue Pair (QP) setup flow:
- *
- * 1) SCIF driver gets probed with a scif_hw_dev via the scif_hw_bus
- * 2) scif_setup_qp(..) allocates the local qp and calls
- * scif_setup_qp_connect(..) which allocates and maps the local
- * buffer for the inbound QP
- * 3) The local node updates the device page with the DMA address of the QP
- * 4) A delayed work is scheduled (qp_dwork) which periodically reads if
- * the peer node has updated its QP DMA address
- * 5) Once a valid non zero address is found in the QP DMA address field
- * in the device page, the local node maps the remote node's QP,
- * updates its outbound QP and sends a SCIF_INIT message to the peer
- * 6) The SCIF_INIT message is received by the peer node QP interrupt bottom
- * half handler by calling scif_init(..)
- * 7) scif_init(..) registers a new SCIF peer node by calling
- * scif_peer_register_device(..) which signifies the addition of a new
- * SCIF node
- * 8) On the mgmt node, P2P network setup/teardown is initiated if all the
- * remote nodes are online via scif_p2p_setup(..)
- * 9) For P2P setup, the host maps the remote nodes' aperture and memory
- * bars and sends a SCIF_NODE_ADD message to both nodes
- * 10) As part of scif_nodeadd, both nodes set up their local inbound
- * QPs and send a SCIF_NODE_ADD_ACK to the mgmt node
- * 11) As part of scif_node_add_ack(..) the mgmt node forwards the
- * SCIF_NODE_ADD_ACK to the remote nodes
- * 12) As part of scif_node_add_ack(..) the remote nodes update their
- * outbound QPs, make sure they can access memory on the remote node
- * and then add a new SCIF peer node by calling
- * scif_peer_register_device(..) which signifies the addition of a new
- * SCIF node.
- * 13) The SCIF network is now established across all nodes.
- *
- ************************************************************************
- * SCIF node QP teardown flow (initiated by non mgmt node):
- *
- * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
- * 2) The device page QP DMA address field is updated with 0x0
- * 3) A non mgmt node now cleans up all local data structures and sends a
- * SCIF_EXIT message to the peer and waits for a SCIF_EXIT_ACK
- * 4) As part of scif_exit(..) handling scif_disconnect_node(..) is called
- * 5) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the
- * peers and waits for a SCIF_NODE_REMOVE_ACK
- * 6) As part of scif_node_remove(..) a remote node unregisters the peer
- * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
- * 7) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
- * it sends itself a node remove message whose handling cleans up local
- * data structures and unregisters the peer node from the SCIF network
- * 8) The mgmt node sends a SCIF_EXIT_ACK
- * 9) Upon receipt of the SCIF_EXIT_ACK the node initiating the teardown
- * completes the SCIF remove routine
- * 10) The SCIF network is now torn down for the node initiating the
- * teardown sequence
- *
- ************************************************************************
- * SCIF node QP teardown flow (initiated by mgmt node):
- *
- * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
- * 2) The device page QP DMA address field is updated with 0x0
- * 3) The mgmt node calls scif_disconnect_node(..)
- * 4) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the peers
- * and waits for a SCIF_NODE_REMOVE_ACK
- * 5) As part of scif_node_remove(..) a remote node unregisters the peer
- * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
- * 6) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
- * it unregisters the peer node from the SCIF network
- * 7) The mgmt node sends a SCIF_EXIT message and waits for a SCIF_EXIT_ACK.
- * 8) A non mgmt node upon receipt of a SCIF_EXIT message calls scif_stop(..)
- * which would clean up local data structures for all SCIF nodes and
- * then send a SCIF_EXIT_ACK back to the mgmt node
- * 9) Upon receipt of the SCIF_EXIT_ACK the the mgmt node sends itself a node
- * remove message whose handling cleans up local data structures and
- * destroys any P2P mappings.
- * 10) The SCIF hardware device for which a remove callback was received is now
- * disconnected from the SCIF network.
- */
-/*
- * Initializes "local" data structures for the QP. Allocates the QP
- * ring buffer (rb) and initializes the "in bound" queue.
- */
-int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
- int local_size, struct scif_dev *scifdev)
-{
- void *local_q = qp->inbound_q.rb_base;
- int err = 0;
- u32 tmp_rd = 0;
-
- spin_lock_init(&qp->send_lock);
- spin_lock_init(&qp->recv_lock);
-
- /* Allocate rb only if not already allocated */
- if (!local_q) {
- local_q = kzalloc(local_size, GFP_KERNEL);
- if (!local_q) {
- err = -ENOMEM;
- return err;
- }
- }
-
- err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
- if (err)
- goto kfree;
- /*
- * To setup the inbound_q, the buffer lives locally, the read pointer
- * is remote and the write pointer is local.
- */
- scif_rb_init(&qp->inbound_q,
- &tmp_rd,
- &qp->local_write,
- local_q, get_count_order(local_size));
- /*
- * The read pointer is NULL initially and it is unsafe to use the ring
- * buffer til this changes!
- */
- qp->inbound_q.read_ptr = NULL;
- err = scif_map_single(qp_offset, qp,
- scifdev, sizeof(struct scif_qp));
- if (err)
- goto unmap;
- qp->local_qp = *qp_offset;
- return err;
-unmap:
- scif_unmap_single(qp->local_buf, scifdev, local_size);
- qp->local_buf = 0;
-kfree:
- kfree(local_q);
- return err;
-}
-
-/* When the other side has already done it's allocation, this is called */
-int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
- dma_addr_t phys, int local_size,
- struct scif_dev *scifdev)
-{
- void *local_q;
- void *remote_q;
- struct scif_qp *remote_qp;
- int remote_size;
- int err = 0;
-
- spin_lock_init(&qp->send_lock);
- spin_lock_init(&qp->recv_lock);
- /* Start by figuring out where we need to point */
- remote_qp = scif_ioremap(phys, sizeof(struct scif_qp), scifdev);
- if (!remote_qp)
- return -EIO;
- qp->remote_qp = remote_qp;
- if (qp->remote_qp->magic != SCIFEP_MAGIC) {
- err = -EIO;
- goto iounmap;
- }
- qp->remote_buf = remote_qp->local_buf;
- remote_size = qp->remote_qp->inbound_q.size;
- remote_q = scif_ioremap(qp->remote_buf, remote_size, scifdev);
- if (!remote_q) {
- err = -EIO;
- goto iounmap;
- }
- qp->remote_qp->local_write = 0;
- /*
- * To setup the outbound_q, the buffer lives in remote memory,
- * the read pointer is local, the write pointer is remote
- */
- scif_rb_init(&qp->outbound_q,
- &qp->local_read,
- &qp->remote_qp->local_write,
- remote_q,
- get_count_order(remote_size));
- local_q = kzalloc(local_size, GFP_KERNEL);
- if (!local_q) {
- err = -ENOMEM;
- goto iounmap_1;
- }
- err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
- if (err)
- goto kfree;
- qp->remote_qp->local_read = 0;
- /*
- * To setup the inbound_q, the buffer lives locally, the read pointer
- * is remote and the write pointer is local
- */
- scif_rb_init(&qp->inbound_q,
- &qp->remote_qp->local_read,
- &qp->local_write,
- local_q, get_count_order(local_size));
- err = scif_map_single(qp_offset, qp, scifdev,
- sizeof(struct scif_qp));
- if (err)
- goto unmap;
- qp->local_qp = *qp_offset;
- return err;
-unmap:
- scif_unmap_single(qp->local_buf, scifdev, local_size);
- qp->local_buf = 0;
-kfree:
- kfree(local_q);
-iounmap_1:
- scif_iounmap(remote_q, remote_size, scifdev);
- qp->outbound_q.rb_base = NULL;
-iounmap:
- scif_iounmap(qp->remote_qp, sizeof(struct scif_qp), scifdev);
- qp->remote_qp = NULL;
- return err;
-}
-
-int scif_setup_qp_connect_response(struct scif_dev *scifdev,
- struct scif_qp *qp, u64 payload)
-{
- int err = 0;
- void *r_buf;
- int remote_size;
- phys_addr_t tmp_phys;
-
- qp->remote_qp = scif_ioremap(payload, sizeof(struct scif_qp), scifdev);
-
- if (!qp->remote_qp) {
- err = -ENOMEM;
- goto error;
- }
-
- if (qp->remote_qp->magic != SCIFEP_MAGIC) {
- dev_err(&scifdev->sdev->dev,
- "SCIFEP_MAGIC mismatch between self %d remote %d\n",
- scif_dev[scif_info.nodeid].node, scifdev->node);
- err = -ENODEV;
- goto error;
- }
-
- tmp_phys = qp->remote_qp->local_buf;
- remote_size = qp->remote_qp->inbound_q.size;
- r_buf = scif_ioremap(tmp_phys, remote_size, scifdev);
-
- if (!r_buf)
- return -EIO;
-
- qp->local_read = 0;
- scif_rb_init(&qp->outbound_q,
- &qp->local_read,
- &qp->remote_qp->local_write,
- r_buf,
- get_count_order(remote_size));
- /*
- * Because the node QP may already be processing an INIT message, set
- * the read pointer so the cached read offset isn't lost
- */
- qp->remote_qp->local_read = qp->inbound_q.current_read_offset;
- /*
- * resetup the inbound_q now that we know where the
- * inbound_read really is.
- */
- scif_rb_init(&qp->inbound_q,
- &qp->remote_qp->local_read,
- &qp->local_write,
- qp->inbound_q.rb_base,
- get_count_order(qp->inbound_q.size));
-error:
- return err;
-}
-
-static __always_inline void
-scif_send_msg_intr(struct scif_dev *scifdev)
-{
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- if (scifdev_is_p2p(scifdev))
- sdev->hw_ops->send_p2p_intr(sdev, scifdev->rdb, &scifdev->mmio);
- else
- sdev->hw_ops->send_intr(sdev, scifdev->rdb);
-}
-
-int scif_qp_response(phys_addr_t phys, struct scif_dev *scifdev)
-{
- int err = 0;
- struct scifmsg msg;
-
- err = scif_setup_qp_connect_response(scifdev, scifdev->qpairs, phys);
- if (!err) {
- /*
- * Now that everything is setup and mapped, we're ready
- * to tell the peer about our queue's location
- */
- msg.uop = SCIF_INIT;
- msg.dst.node = scifdev->node;
- err = scif_nodeqp_send(scifdev, &msg);
- }
- return err;
-}
-
-void scif_send_exit(struct scif_dev *scifdev)
-{
- struct scifmsg msg;
- int ret;
-
- scifdev->exit = OP_IN_PROGRESS;
- msg.uop = SCIF_EXIT;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = scifdev->node;
- ret = scif_nodeqp_send(scifdev, &msg);
- if (ret)
- goto done;
- /* Wait for a SCIF_EXIT_ACK message */
- wait_event_timeout(scif_info.exitwq, scifdev->exit == OP_COMPLETED,
- SCIF_NODE_ALIVE_TIMEOUT);
-done:
- scifdev->exit = OP_IDLE;
-}
-
-int scif_setup_qp(struct scif_dev *scifdev)
-{
- int err = 0;
- int local_size;
- struct scif_qp *qp;
-
- local_size = SCIF_NODE_QP_SIZE;
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- err = -ENOMEM;
- return err;
- }
- qp->magic = SCIFEP_MAGIC;
- scifdev->qpairs = qp;
- err = scif_setup_qp_connect(qp, &scifdev->qp_dma_addr,
- local_size, scifdev);
- if (err)
- goto free_qp;
- /*
- * We're as setup as we can be. The inbound_q is setup, w/o a usable
- * outbound q. When we get a message, the read_ptr will be updated,
- * and we will pull the message.
- */
- return err;
-free_qp:
- kfree(scifdev->qpairs);
- scifdev->qpairs = NULL;
- return err;
-}
-
-static void scif_p2p_freesg(struct scatterlist *sg)
-{
- kfree(sg);
-}
-
-static struct scatterlist *
-scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
-{
- struct scatterlist *sg;
- struct page *page;
- int i;
-
- sg = kmalloc_array(page_cnt, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return NULL;
- sg_init_table(sg, page_cnt);
- for (i = 0; i < page_cnt; i++) {
- page = pfn_to_page(pa >> PAGE_SHIFT);
- sg_set_page(&sg[i], page, page_size, 0);
- pa += page_size;
- }
- return sg;
-}
-
-/* Init p2p mappings required to access peerdev from scifdev */
-static struct scif_p2p_info *
-scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
-{
- struct scif_p2p_info *p2p;
- int num_mmio_pages, num_aper_pages, sg_page_shift, err, num_aper_chunks;
- struct scif_hw_dev *psdev = peerdev->sdev;
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- num_mmio_pages = psdev->mmio->len >> PAGE_SHIFT;
- num_aper_pages = psdev->aper->len >> PAGE_SHIFT;
-
- p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
- if (!p2p)
- return NULL;
- p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
- PAGE_SIZE, num_mmio_pages);
- if (!p2p->ppi_sg[SCIF_PPI_MMIO])
- goto free_p2p;
- p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
- sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
- num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
- p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
- 1 << sg_page_shift,
- num_aper_chunks);
- p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
- err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- num_mmio_pages, PCI_DMA_BIDIRECTIONAL);
- if (err != num_mmio_pages)
- goto scif_p2p_free;
- err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
- num_aper_chunks, PCI_DMA_BIDIRECTIONAL);
- if (err != num_aper_chunks)
- goto dma_unmap;
- p2p->ppi_da[SCIF_PPI_MMIO] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_MMIO]);
- p2p->ppi_da[SCIF_PPI_APER] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_APER]);
- p2p->ppi_len[SCIF_PPI_MMIO] = num_mmio_pages;
- p2p->ppi_len[SCIF_PPI_APER] = num_aper_pages;
- p2p->ppi_peer_id = peerdev->node;
- return p2p;
-dma_unmap:
- dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
-scif_p2p_free:
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
-free_p2p:
- kfree(p2p);
- return NULL;
-}
-
-/* Uninitialize and release resources from a p2p mapping */
-static void scif_deinit_p2p_info(struct scif_dev *scifdev,
- struct scif_p2p_info *p2p)
-{
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
- dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
- p2p->sg_nentries[SCIF_PPI_APER], DMA_BIDIRECTIONAL);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
- kfree(p2p);
-}
-
-/**
- * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
- * @scifdev: SCIF device
- * @dst: Destination node
- *
- * Connect the src and dst node by setting up the p2p connection
- * between them. Management node here acts like a proxy.
- */
-static void scif_node_connect(struct scif_dev *scifdev, int dst)
-{
- struct scif_dev *dev_j = scifdev;
- struct scif_dev *dev_i = NULL;
- struct scif_p2p_info *p2p_ij = NULL; /* bus addr for j from i */
- struct scif_p2p_info *p2p_ji = NULL; /* bus addr for i from j */
- struct scif_p2p_info *p2p;
- struct list_head *pos, *tmp;
- struct scifmsg msg;
- int err;
- u64 tmppayload;
-
- if (dst < 1 || dst > scif_info.maxid)
- return;
-
- dev_i = &scif_dev[dst];
-
- if (!_scifdev_alive(dev_i))
- return;
- /*
- * If the p2p connection is already setup or in the process of setting
- * up then just ignore this request. The requested node will get
- * informed by SCIF_NODE_ADD_ACK or SCIF_NODE_ADD_NACK
- */
- if (!list_empty(&dev_i->p2p)) {
- list_for_each_safe(pos, tmp, &dev_i->p2p) {
- p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
- if (p2p->ppi_peer_id == dev_j->node)
- return;
- }
- }
- p2p_ij = scif_init_p2p_info(dev_i, dev_j);
- if (!p2p_ij)
- return;
- p2p_ji = scif_init_p2p_info(dev_j, dev_i);
- if (!p2p_ji) {
- scif_deinit_p2p_info(dev_i, p2p_ij);
- return;
- }
- list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p);
- list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p);
-
- /*
- * Send a SCIF_NODE_ADD to dev_i, pass it its bus address
- * as seen from dev_j
- */
- msg.uop = SCIF_NODE_ADD;
- msg.src.node = dev_j->node;
- msg.dst.node = dev_i->node;
-
- msg.payload[0] = p2p_ji->ppi_da[SCIF_PPI_APER];
- msg.payload[1] = p2p_ij->ppi_da[SCIF_PPI_MMIO];
- msg.payload[2] = p2p_ij->ppi_da[SCIF_PPI_APER];
- msg.payload[3] = p2p_ij->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
-
- err = scif_nodeqp_send(dev_i, &msg);
- if (err) {
- dev_err(&scifdev->sdev->dev,
- "%s %d error %d\n", __func__, __LINE__, err);
- return;
- }
-
- /* Same as above but to dev_j */
- msg.uop = SCIF_NODE_ADD;
- msg.src.node = dev_i->node;
- msg.dst.node = dev_j->node;
-
- tmppayload = msg.payload[0];
- msg.payload[0] = msg.payload[2];
- msg.payload[2] = tmppayload;
- msg.payload[1] = p2p_ji->ppi_da[SCIF_PPI_MMIO];
- msg.payload[3] = p2p_ji->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
-
- scif_nodeqp_send(dev_j, &msg);
-}
-
-static void scif_p2p_setup(void)
-{
- int i, j;
-
- if (!scif_info.p2p_enable)
- return;
-
- for (i = 1; i <= scif_info.maxid; i++)
- if (!_scifdev_alive(&scif_dev[i]))
- return;
-
- for (i = 1; i <= scif_info.maxid; i++) {
- for (j = 1; j <= scif_info.maxid; j++) {
- struct scif_dev *scifdev = &scif_dev[i];
-
- if (i == j)
- continue;
- scif_node_connect(scifdev, j);
- }
- }
-}
-
-static char *message_types[] = {"BAD",
- "INIT",
- "EXIT",
- "SCIF_EXIT_ACK",
- "SCIF_NODE_ADD",
- "SCIF_NODE_ADD_ACK",
- "SCIF_NODE_ADD_NACK",
- "REMOVE_NODE",
- "REMOVE_NODE_ACK",
- "CNCT_REQ",
- "CNCT_GNT",
- "CNCT_GNTACK",
- "CNCT_GNTNACK",
- "CNCT_REJ",
- "DISCNCT",
- "DISCNT_ACK",
- "CLIENT_SENT",
- "CLIENT_RCVD",
- "SCIF_GET_NODE_INFO",
- "REGISTER",
- "REGISTER_ACK",
- "REGISTER_NACK",
- "UNREGISTER",
- "UNREGISTER_ACK",
- "UNREGISTER_NACK",
- "ALLOC_REQ",
- "ALLOC_GNT",
- "ALLOC_REJ",
- "FREE_PHYS",
- "FREE_VIRT",
- "MUNMAP",
- "MARK",
- "MARK_ACK",
- "MARK_NACK",
- "WAIT",
- "WAIT_ACK",
- "WAIT_NACK",
- "SIGNAL_LOCAL",
- "SIGNAL_REMOTE",
- "SIG_ACK",
- "SIG_NACK"};
-
-static void
-scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg,
- const char *label)
-{
- if (!scif_info.en_msg_log)
- return;
- if (msg->uop > SCIF_MAX_MSG) {
- dev_err(&scifdev->sdev->dev,
- "%s: unknown msg type %d\n", label, msg->uop);
- return;
- }
- dev_info(&scifdev->sdev->dev,
- "%s: msg type %s, src %d:%d, dest %d:%d payload 0x%llx:0x%llx:0x%llx:0x%llx\n",
- label, message_types[msg->uop], msg->src.node, msg->src.port,
- msg->dst.node, msg->dst.port, msg->payload[0], msg->payload[1],
- msg->payload[2], msg->payload[3]);
-}
-
-int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_qp *qp = scifdev->qpairs;
- int err = -ENOMEM, loop_cnt = 0;
-
- scif_display_message(scifdev, msg, "Sent");
- if (!qp) {
- err = -EINVAL;
- goto error;
- }
- spin_lock(&qp->send_lock);
-
- while ((err = scif_rb_write(&qp->outbound_q,
- msg, sizeof(struct scifmsg)))) {
- mdelay(1);
-#define SCIF_NODEQP_SEND_TO_MSEC (3 * 1000)
- if (loop_cnt++ > (SCIF_NODEQP_SEND_TO_MSEC)) {
- err = -ENODEV;
- break;
- }
- }
- if (!err)
- scif_rb_commit(&qp->outbound_q);
- spin_unlock(&qp->send_lock);
- if (!err) {
- if (scifdev_self(scifdev))
- /*
- * For loopback we need to emulate an interrupt by
- * queuing work for the queue handling real node
- * Qp interrupts.
- */
- queue_work(scifdev->intr_wq, &scifdev->intr_bh);
- else
- scif_send_msg_intr(scifdev);
- }
-error:
- if (err)
- dev_dbg(&scifdev->sdev->dev,
- "%s %d error %d uop %d\n",
- __func__, __LINE__, err, msg->uop);
- return err;
-}
-
-/**
- * scif_nodeqp_send - Send a message on the node queue pair
- * @scifdev: Scif Device.
- * @msg: The message to be sent.
- */
-int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- int err;
- struct device *spdev = NULL;
-
- if (msg->uop > SCIF_EXIT_ACK) {
- /* Don't send messages once the exit flow has begun */
- if (OP_IDLE != scifdev->exit)
- return -ENODEV;
- spdev = scif_get_peer_dev(scifdev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
- }
- err = _scif_nodeqp_send(scifdev, msg);
- if (msg->uop > SCIF_EXIT_ACK)
- scif_put_peer_dev(spdev);
- return err;
-}
-
-/*
- * scif_misc_handler:
- *
- * Work queue handler for servicing miscellaneous SCIF tasks.
- * Examples include:
- * 1) Remote fence requests.
- * 2) Destruction of temporary registered windows
- * created during scif_vreadfrom()/scif_vwriteto().
- * 3) Cleanup of zombie endpoints.
- */
-void scif_misc_handler(struct work_struct *work)
-{
- scif_rma_handle_remote_fences();
- scif_rma_destroy_windows();
- scif_rma_destroy_tcw_invalid();
- scif_cleanup_zombie_epd();
-}
-
-/**
- * scif_init() - Respond to SCIF_INIT interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- */
-static __always_inline void
-scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- /*
- * Allow the thread waiting for device page updates for the peer QP DMA
- * address to complete initializing the inbound_q.
- */
- flush_delayed_work(&scifdev->qp_dwork);
-
- scif_peer_register_device(scifdev);
-
- if (scif_is_mgmt_node()) {
- mutex_lock(&scif_info.conflock);
- scif_p2p_setup();
- mutex_unlock(&scif_info.conflock);
- }
-}
-
-/**
- * scif_exit() - Respond to SCIF_EXIT interrupt message
- * @scifdev: Remote SCIF device node
- * @unused: Interrupt message (unused)
- *
- * This function stops the SCIF interface for the node which sent
- * the SCIF_EXIT message and starts waiting for that node to
- * resetup the queue pair again.
- */
-static __always_inline void
-scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
-{
- scifdev->exit_ack_pending = true;
- if (scif_is_mgmt_node())
- scif_disconnect_node(scifdev->node, false);
- else
- scif_stop(scifdev);
- schedule_delayed_work(&scifdev->qp_dwork,
- msecs_to_jiffies(1000));
-}
-
-/**
- * scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
- * @scifdev: Remote SCIF device node
- * @unused: Interrupt message (unused)
- *
- */
-static __always_inline void
-scif_exit_ack(struct scif_dev *scifdev, struct scifmsg *unused)
-{
- scifdev->exit = OP_COMPLETED;
- wake_up(&scif_info.exitwq);
-}
-
-/**
- * scif_node_add() - Respond to SCIF_NODE_ADD interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * When the mgmt node driver has finished initializing a MIC node queue pair it
- * marks the node as online. It then looks for all currently online MIC cards
- * and send a SCIF_NODE_ADD message to identify the ID of the new card for
- * peer to peer initialization
- *
- * The local node allocates its incoming queue and sends its address in the
- * SCIF_NODE_ADD_ACK message back to the mgmt node, the mgmt node "reflects"
- * this message to the new node
- */
-static __always_inline void
-scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_dev *newdev;
- dma_addr_t qp_offset;
- int qp_connect;
- struct scif_hw_dev *sdev;
-
- dev_dbg(&scifdev->sdev->dev,
- "Scifdev %d:%d received NODE_ADD msg for node %d\n",
- scifdev->node, msg->dst.node, msg->src.node);
- dev_dbg(&scifdev->sdev->dev,
- "Remote address for this node's aperture %llx\n",
- msg->payload[0]);
- newdev = &scif_dev[msg->src.node];
- newdev->node = msg->src.node;
- newdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
- sdev = newdev->sdev;
-
- if (scif_setup_intr_wq(newdev)) {
- dev_err(&scifdev->sdev->dev,
- "failed to setup interrupts for %d\n", msg->src.node);
- goto interrupt_setup_error;
- }
- newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len);
- if (!newdev->mmio.va) {
- dev_err(&scifdev->sdev->dev,
- "failed to map mmio for %d\n", msg->src.node);
- goto mmio_map_error;
- }
- newdev->qpairs = kzalloc(sizeof(*newdev->qpairs), GFP_KERNEL);
- if (!newdev->qpairs)
- goto qp_alloc_error;
- /*
- * Set the base address of the remote node's memory since it gets
- * added to qp_offset
- */
- newdev->base_addr = msg->payload[0];
-
- qp_connect = scif_setup_qp_connect(newdev->qpairs, &qp_offset,
- SCIF_NODE_QP_SIZE, newdev);
- if (qp_connect) {
- dev_err(&scifdev->sdev->dev,
- "failed to setup qp_connect %d\n", qp_connect);
- goto qp_connect_error;
- }
-
- newdev->db = sdev->hw_ops->next_db(sdev);
- newdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
- "SCIF_INTR", newdev,
- newdev->db);
- if (IS_ERR(newdev->cookie))
- goto qp_connect_error;
- newdev->qpairs->magic = SCIFEP_MAGIC;
- newdev->qpairs->qp_state = SCIF_QP_OFFLINE;
-
- msg->uop = SCIF_NODE_ADD_ACK;
- msg->dst.node = msg->src.node;
- msg->src.node = scif_info.nodeid;
- msg->payload[0] = qp_offset;
- msg->payload[2] = newdev->db;
- scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
- return;
-qp_connect_error:
- kfree(newdev->qpairs);
- newdev->qpairs = NULL;
-qp_alloc_error:
- iounmap(newdev->mmio.va);
- newdev->mmio.va = NULL;
-mmio_map_error:
-interrupt_setup_error:
- dev_err(&scifdev->sdev->dev,
- "node add failed for node %d\n", msg->src.node);
- msg->uop = SCIF_NODE_ADD_NACK;
- msg->dst.node = msg->src.node;
- msg->src.node = scif_info.nodeid;
- scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
-}
-
-void scif_poll_qp_state(struct work_struct *work)
-{
-#define SCIF_NODE_QP_RETRY 100
-#define SCIF_NODE_QP_TIMEOUT 100
- struct scif_dev *peerdev = container_of(work, struct scif_dev,
- p2p_dwork.work);
- struct scif_qp *qp = &peerdev->qpairs[0];
-
- if (qp->qp_state != SCIF_QP_ONLINE ||
- qp->remote_qp->qp_state != SCIF_QP_ONLINE) {
- if (peerdev->p2p_retry++ == SCIF_NODE_QP_RETRY) {
- dev_err(&peerdev->sdev->dev,
- "Warning: QP check timeout with state %d\n",
- qp->qp_state);
- goto timeout;
- }
- schedule_delayed_work(&peerdev->p2p_dwork,
- msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT));
- return;
- }
- return;
-timeout:
- dev_err(&peerdev->sdev->dev,
- "%s %d remote node %d offline, state = 0x%x\n",
- __func__, __LINE__, peerdev->node, qp->qp_state);
- qp->remote_qp->qp_state = SCIF_QP_OFFLINE;
- scif_peer_unregister_device(peerdev);
- scif_cleanup_scifdev(peerdev);
-}
-
-/**
- * scif_node_add_ack() - Respond to SCIF_NODE_ADD_ACK interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * After a MIC node receives the SCIF_NODE_ADD_ACK message it send this
- * message to the mgmt node to confirm the sequence is finished.
- *
- */
-static __always_inline void
-scif_node_add_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_dev *peerdev;
- struct scif_qp *qp;
- struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
-
- dev_dbg(&scifdev->sdev->dev,
- "Scifdev %d received SCIF_NODE_ADD_ACK msg src %d dst %d\n",
- scifdev->node, msg->src.node, msg->dst.node);
- dev_dbg(&scifdev->sdev->dev,
- "payload %llx %llx %llx %llx\n", msg->payload[0],
- msg->payload[1], msg->payload[2], msg->payload[3]);
- if (scif_is_mgmt_node()) {
- /*
- * the lock serializes with scif_qp_response_ack. The mgmt node
- * is forwarding the NODE_ADD_ACK message from src to dst we
- * need to make sure that the dst has already received a
- * NODE_ADD for src and setup its end of the qp to dst
- */
- mutex_lock(&scif_info.conflock);
- msg->payload[1] = scif_info.maxid;
- scif_nodeqp_send(dst_dev, msg);
- mutex_unlock(&scif_info.conflock);
- return;
- }
- peerdev = &scif_dev[msg->src.node];
- peerdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
- peerdev->node = msg->src.node;
-
- qp = &peerdev->qpairs[0];
-
- if ((scif_setup_qp_connect_response(peerdev, &peerdev->qpairs[0],
- msg->payload[0])))
- goto local_error;
- peerdev->rdb = msg->payload[2];
- qp->remote_qp->qp_state = SCIF_QP_ONLINE;
-
- scif_peer_register_device(peerdev);
-
- schedule_delayed_work(&peerdev->p2p_dwork, 0);
- return;
-local_error:
- scif_cleanup_scifdev(peerdev);
-}
-
-/**
- * scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * SCIF_NODE_ADD failed, so inform the waiting wq.
- */
-static __always_inline void
-scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- if (scif_is_mgmt_node()) {
- struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
-
- dev_dbg(&scifdev->sdev->dev,
- "SCIF_NODE_ADD_NACK received from %d\n", scifdev->node);
- scif_nodeqp_send(dst_dev, msg);
- }
-}
-
-/**
- * scif_node_remove: Handle SCIF_NODE_REMOVE message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * Handle node removal.
- */
-static __always_inline void
-scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- int node = msg->payload[0];
- struct scif_dev *scdev = &scif_dev[node];
-
- scdev->node_remove_ack_pending = true;
- scif_handle_remove_node(node);
-}
-
-/**
- * scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * The peer has acked a SCIF_NODE_REMOVE message.
- */
-static __always_inline void
-scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_dev *sdev = &scif_dev[msg->payload[0]];
-
- atomic_inc(&sdev->disconn_rescnt);
- wake_up(&sdev->disconn_wq);
-}
-
-/**
- * scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * Retrieve node info i.e maxid and total from the mgmt node.
- */
-static __always_inline void
-scif_get_node_info_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- if (scif_is_mgmt_node()) {
- swap(msg->dst.node, msg->src.node);
- mutex_lock(&scif_info.conflock);
- msg->payload[1] = scif_info.maxid;
- msg->payload[2] = scif_info.total;
- mutex_unlock(&scif_info.conflock);
- scif_nodeqp_send(scifdev, msg);
- } else {
- struct completion *node_info =
- (struct completion *)msg->payload[3];
-
- mutex_lock(&scif_info.conflock);
- scif_info.maxid = msg->payload[1];
- scif_info.total = msg->payload[2];
- complete_all(node_info);
- mutex_unlock(&scif_info.conflock);
- }
-}
-
-static void
-scif_msg_unknown(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- /* Bogus Node Qp Message? */
- dev_err(&scifdev->sdev->dev,
- "Unknown message 0x%xn scifdev->node 0x%x\n",
- msg->uop, scifdev->node);
-}
-
-static void (*scif_intr_func[SCIF_MAX_MSG + 1])
- (struct scif_dev *, struct scifmsg *msg) = {
- scif_msg_unknown, /* Error */
- scif_init, /* SCIF_INIT */
- scif_exit, /* SCIF_EXIT */
- scif_exit_ack, /* SCIF_EXIT_ACK */
- scif_node_add, /* SCIF_NODE_ADD */
- scif_node_add_ack, /* SCIF_NODE_ADD_ACK */
- scif_node_add_nack, /* SCIF_NODE_ADD_NACK */
- scif_node_remove, /* SCIF_NODE_REMOVE */
- scif_node_remove_ack, /* SCIF_NODE_REMOVE_ACK */
- scif_cnctreq, /* SCIF_CNCT_REQ */
- scif_cnctgnt, /* SCIF_CNCT_GNT */
- scif_cnctgnt_ack, /* SCIF_CNCT_GNTACK */
- scif_cnctgnt_nack, /* SCIF_CNCT_GNTNACK */
- scif_cnctrej, /* SCIF_CNCT_REJ */
- scif_discnct, /* SCIF_DISCNCT */
- scif_discnt_ack, /* SCIF_DISCNT_ACK */
- scif_clientsend, /* SCIF_CLIENT_SENT */
- scif_clientrcvd, /* SCIF_CLIENT_RCVD */
- scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */
- scif_recv_reg, /* SCIF_REGISTER */
- scif_recv_reg_ack, /* SCIF_REGISTER_ACK */
- scif_recv_reg_nack, /* SCIF_REGISTER_NACK */
- scif_recv_unreg, /* SCIF_UNREGISTER */
- scif_recv_unreg_ack, /* SCIF_UNREGISTER_ACK */
- scif_recv_unreg_nack, /* SCIF_UNREGISTER_NACK */
- scif_alloc_req, /* SCIF_ALLOC_REQ */
- scif_alloc_gnt_rej, /* SCIF_ALLOC_GNT */
- scif_alloc_gnt_rej, /* SCIF_ALLOC_REJ */
- scif_free_virt, /* SCIF_FREE_VIRT */
- scif_recv_munmap, /* SCIF_MUNMAP */
- scif_recv_mark, /* SCIF_MARK */
- scif_recv_mark_resp, /* SCIF_MARK_ACK */
- scif_recv_mark_resp, /* SCIF_MARK_NACK */
- scif_recv_wait, /* SCIF_WAIT */
- scif_recv_wait_resp, /* SCIF_WAIT_ACK */
- scif_recv_wait_resp, /* SCIF_WAIT_NACK */
- scif_recv_sig_local, /* SCIF_SIG_LOCAL */
- scif_recv_sig_remote, /* SCIF_SIG_REMOTE */
- scif_recv_sig_resp, /* SCIF_SIG_ACK */
- scif_recv_sig_resp, /* SCIF_SIG_NACK */
-};
-
-static int scif_max_msg_id = SCIF_MAX_MSG;
-/**
- * scif_nodeqp_msg_handler() - Common handler for node messages
- * @scifdev: Remote device to respond to
- * @qp: Remote memory pointer
- * @msg: The message to be handled.
- *
- * This routine calls the appropriate routine to handle a Node Qp
- * message receipt
- */
-static void
-scif_nodeqp_msg_handler(struct scif_dev *scifdev,
- struct scif_qp *qp, struct scifmsg *msg)
-{
- scif_display_message(scifdev, msg, "Rcvd");
-
- if (msg->uop > (u32)scif_max_msg_id) {
- /* Bogus Node Qp Message? */
- dev_err(&scifdev->sdev->dev,
- "Unknown message 0x%xn scifdev->node 0x%x\n",
- msg->uop, scifdev->node);
- return;
- }
-
- scif_intr_func[msg->uop](scifdev, msg);
-}
-
-/**
- * scif_nodeqp_intrhandler() - Interrupt handler for node messages
- * @scifdev: Remote device to respond to
- * @qp: Remote memory pointer
- *
- * This routine is triggered by the interrupt mechanism. It reads
- * messages from the node queue RB and calls the Node QP Message handling
- * routine.
- */
-void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
-{
- struct scifmsg msg;
- int read_size;
-
- do {
- read_size = scif_rb_get_next(&qp->inbound_q, &msg, sizeof(msg));
- if (!read_size)
- break;
- scif_nodeqp_msg_handler(scifdev, qp, &msg);
- /*
- * The node queue pair is unmapped so skip the read pointer
- * update after receipt of a SCIF_EXIT_ACK
- */
- if (SCIF_EXIT_ACK == msg.uop)
- break;
- scif_rb_update_read_ptr(&qp->inbound_q);
- } while (1);
-}
-
-/**
- * scif_loopb_wq_handler - Loopback Workqueue Handler.
- * @unused: loop back work (unused)
- *
- * This work queue routine is invoked by the loopback work queue handler.
- * It grabs the recv lock, dequeues any available messages from the head
- * of the loopback message list, calls the node QP message handler,
- * waits for it to return, then frees up this message and dequeues more
- * elements of the list if available.
- */
-static void scif_loopb_wq_handler(struct work_struct *unused)
-{
- struct scif_dev *scifdev = scif_info.loopb_dev;
- struct scif_qp *qp = scifdev->qpairs;
- struct scif_loopb_msg *msg;
-
- do {
- msg = NULL;
- spin_lock(&qp->recv_lock);
- if (!list_empty(&scif_info.loopb_recv_q)) {
- msg = list_first_entry(&scif_info.loopb_recv_q,
- struct scif_loopb_msg,
- list);
- list_del(&msg->list);
- }
- spin_unlock(&qp->recv_lock);
-
- if (msg) {
- scif_nodeqp_msg_handler(scifdev, qp, &msg->msg);
- kfree(msg);
- }
- } while (msg);
-}
-
-/**
- * scif_loopb_msg_handler() - Workqueue handler for loopback messages.
- * @scifdev: SCIF device
- * @qp: Queue pair.
- *
- * This work queue routine is triggered when a loopback message is received.
- *
- * We need special handling for receiving Node Qp messages on a loopback SCIF
- * device via two workqueues for receiving messages.
- *
- * The reason we need the extra workqueue which is not required with *normal*
- * non-loopback SCIF devices is the potential classic deadlock described below:
- *
- * Thread A tries to send a message on a loopback SCIF device and blocks since
- * there is no space in the RB while it has the send_lock held or another
- * lock called lock X for example.
- *
- * Thread B: The Loopback Node QP message receive workqueue receives the message
- * and tries to send a message (eg an ACK) to the loopback SCIF device. It tries
- * to grab the send lock again or lock X and deadlocks with Thread A. The RB
- * cannot be drained any further due to this classic deadlock.
- *
- * In order to avoid deadlocks as mentioned above we have an extra level of
- * indirection achieved by having two workqueues.
- * 1) The first workqueue whose handler is scif_loopb_msg_handler reads
- * messages from the Node QP RB, adds them to a list and queues work for the
- * second workqueue.
- *
- * 2) The second workqueue whose handler is scif_loopb_wq_handler dequeues
- * messages from the list, handles them, frees up the memory and dequeues
- * more elements from the list if possible.
- */
-int
-scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp)
-{
- int read_size;
- struct scif_loopb_msg *msg;
-
- do {
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
- read_size = scif_rb_get_next(&qp->inbound_q, &msg->msg,
- sizeof(struct scifmsg));
- if (read_size != sizeof(struct scifmsg)) {
- kfree(msg);
- scif_rb_update_read_ptr(&qp->inbound_q);
- break;
- }
- spin_lock(&qp->recv_lock);
- list_add_tail(&msg->list, &scif_info.loopb_recv_q);
- spin_unlock(&qp->recv_lock);
- queue_work(scif_info.loopb_wq, &scif_info.loopb_work);
- scif_rb_update_read_ptr(&qp->inbound_q);
- } while (read_size == sizeof(struct scifmsg));
- return read_size;
-}
-
-/**
- * scif_setup_loopback_qp - One time setup work for Loopback Node Qp.
- * @scifdev: SCIF device
- *
- * Sets up the required loopback workqueues, queue pairs and ring buffers
- */
-int scif_setup_loopback_qp(struct scif_dev *scifdev)
-{
- int err = 0;
- void *local_q;
- struct scif_qp *qp;
-
- err = scif_setup_intr_wq(scifdev);
- if (err)
- goto exit;
- INIT_LIST_HEAD(&scif_info.loopb_recv_q);
- snprintf(scif_info.loopb_wqname, sizeof(scif_info.loopb_wqname),
- "SCIF LOOPB %d", scifdev->node);
- scif_info.loopb_wq =
- alloc_ordered_workqueue(scif_info.loopb_wqname, 0);
- if (!scif_info.loopb_wq) {
- err = -ENOMEM;
- goto destroy_intr;
- }
- INIT_WORK(&scif_info.loopb_work, scif_loopb_wq_handler);
- /* Allocate Self Qpair */
- scifdev->qpairs = kzalloc(sizeof(*scifdev->qpairs), GFP_KERNEL);
- if (!scifdev->qpairs) {
- err = -ENOMEM;
- goto destroy_loopb_wq;
- }
-
- qp = scifdev->qpairs;
- qp->magic = SCIFEP_MAGIC;
- spin_lock_init(&qp->send_lock);
- spin_lock_init(&qp->recv_lock);
-
- local_q = kzalloc(SCIF_NODE_QP_SIZE, GFP_KERNEL);
- if (!local_q) {
- err = -ENOMEM;
- goto free_qpairs;
- }
- /*
- * For loopback the inbound_q and outbound_q are essentially the same
- * since the Node sends a message on the loopback interface to the
- * outbound_q which is then received on the inbound_q.
- */
- scif_rb_init(&qp->outbound_q,
- &qp->local_read,
- &qp->local_write,
- local_q, get_count_order(SCIF_NODE_QP_SIZE));
-
- scif_rb_init(&qp->inbound_q,
- &qp->local_read,
- &qp->local_write,
- local_q, get_count_order(SCIF_NODE_QP_SIZE));
- scif_info.nodeid = scifdev->node;
-
- scif_peer_register_device(scifdev);
-
- scif_info.loopb_dev = scifdev;
- return err;
-free_qpairs:
- kfree(scifdev->qpairs);
-destroy_loopb_wq:
- destroy_workqueue(scif_info.loopb_wq);
-destroy_intr:
- scif_destroy_intr_wq(scifdev);
-exit:
- return err;
-}
-
-/**
- * scif_destroy_loopback_qp - One time uninit work for Loopback Node Qp
- * @scifdev: SCIF device
- *
- * Destroys the workqueues and frees up the Ring Buffer and Queue Pair memory.
- */
-int scif_destroy_loopback_qp(struct scif_dev *scifdev)
-{
- scif_peer_unregister_device(scifdev);
- destroy_workqueue(scif_info.loopb_wq);
- scif_destroy_intr_wq(scifdev);
- kfree(scifdev->qpairs->outbound_q.rb_base);
- kfree(scifdev->qpairs);
- scifdev->sdev = NULL;
- scif_info.loopb_dev = NULL;
- return 0;
-}
-
-void scif_destroy_p2p(struct scif_dev *scifdev)
-{
- struct scif_dev *peer_dev;
- struct scif_p2p_info *p2p;
- struct list_head *pos, *tmp;
- int bd;
-
- mutex_lock(&scif_info.conflock);
- /* Free P2P mappings in the given node for all its peer nodes */
- list_for_each_safe(pos, tmp, &scifdev->p2p) {
- p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
- dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO],
- DMA_BIDIRECTIONAL);
- dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
- p2p->sg_nentries[SCIF_PPI_APER],
- DMA_BIDIRECTIONAL);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
- list_del(pos);
- kfree(p2p);
- }
-
- /* Free P2P mapping created in the peer nodes for the given node */
- for (bd = SCIF_MGMT_NODE + 1; bd <= scif_info.maxid; bd++) {
- peer_dev = &scif_dev[bd];
- list_for_each_safe(pos, tmp, &peer_dev->p2p) {
- p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
- if (p2p->ppi_peer_id == scifdev->node) {
- dma_unmap_sg(&peer_dev->sdev->dev,
- p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO],
- DMA_BIDIRECTIONAL);
- dma_unmap_sg(&peer_dev->sdev->dev,
- p2p->ppi_sg[SCIF_PPI_APER],
- p2p->sg_nentries[SCIF_PPI_APER],
- DMA_BIDIRECTIONAL);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
- list_del(pos);
- kfree(p2p);
- }
- }
- }
- mutex_unlock(&scif_info.conflock);
-}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.h b/drivers/misc/mic/scif/scif_nodeqp.h
deleted file mode 100644
index 95896273138e..000000000000
--- a/drivers/misc/mic/scif/scif_nodeqp.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef SCIF_NODEQP
-#define SCIF_NODEQP
-
-#include "scif_rb.h"
-#include "scif_peer_bus.h"
-
-#define SCIF_INIT 1 /* First message sent to the peer node for discovery */
-#define SCIF_EXIT 2 /* Last message from the peer informing intent to exit */
-#define SCIF_EXIT_ACK 3 /* Response to SCIF_EXIT message */
-#define SCIF_NODE_ADD 4 /* Tell Online nodes a new node exits */
-#define SCIF_NODE_ADD_ACK 5 /* Confirm to mgmt node sequence is finished */
-#define SCIF_NODE_ADD_NACK 6 /* SCIF_NODE_ADD failed */
-#define SCIF_NODE_REMOVE 7 /* Request to deactivate a SCIF node */
-#define SCIF_NODE_REMOVE_ACK 8 /* Response to a SCIF_NODE_REMOVE message */
-#define SCIF_CNCT_REQ 9 /* Phys addr of Request connection to a port */
-#define SCIF_CNCT_GNT 10 /* Phys addr of new Grant connection request */
-#define SCIF_CNCT_GNTACK 11 /* Error type Reject a connection request */
-#define SCIF_CNCT_GNTNACK 12 /* Error type Reject a connection request */
-#define SCIF_CNCT_REJ 13 /* Error type Reject a connection request */
-#define SCIF_DISCNCT 14 /* Notify peer that connection is being terminated */
-#define SCIF_DISCNT_ACK 15 /* Notify peer that connection is being terminated */
-#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */
-#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */
-#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/
-#define SCIF_REGISTER 19 /* Tell peer about a new registered window */
-#define SCIF_REGISTER_ACK 20 /* Notify peer about unregistration success */
-#define SCIF_REGISTER_NACK 21 /* Notify peer about registration success */
-#define SCIF_UNREGISTER 22 /* Tell peer about unregistering a window */
-#define SCIF_UNREGISTER_ACK 23 /* Notify peer about registration failure */
-#define SCIF_UNREGISTER_NACK 24 /* Notify peer about unregistration failure */
-#define SCIF_ALLOC_REQ 25 /* Request a mapped buffer */
-#define SCIF_ALLOC_GNT 26 /* Notify peer about allocation success */
-#define SCIF_ALLOC_REJ 27 /* Notify peer about allocation failure */
-#define SCIF_FREE_VIRT 28 /* Free previously allocated virtual memory */
-#define SCIF_MUNMAP 29 /* Acknowledgment for a SCIF_MMAP request */
-#define SCIF_MARK 30 /* SCIF Remote Fence Mark Request */
-#define SCIF_MARK_ACK 31 /* SCIF Remote Fence Mark Success */
-#define SCIF_MARK_NACK 32 /* SCIF Remote Fence Mark Failure */
-#define SCIF_WAIT 33 /* SCIF Remote Fence Wait Request */
-#define SCIF_WAIT_ACK 34 /* SCIF Remote Fence Wait Success */
-#define SCIF_WAIT_NACK 35 /* SCIF Remote Fence Wait Failure */
-#define SCIF_SIG_LOCAL 36 /* SCIF Remote Fence Local Signal Request */
-#define SCIF_SIG_REMOTE 37 /* SCIF Remote Fence Remote Signal Request */
-#define SCIF_SIG_ACK 38 /* SCIF Remote Fence Remote Signal Success */
-#define SCIF_SIG_NACK 39 /* SCIF Remote Fence Remote Signal Failure */
-#define SCIF_MAX_MSG SCIF_SIG_NACK
-
-/*
- * struct scifmsg - Node QP message format
- *
- * @src: Source information
- * @dst: Destination information
- * @uop: The message opcode
- * @payload: Unique payload format for each message
- */
-struct scifmsg {
- struct scif_port_id src;
- struct scif_port_id dst;
- u32 uop;
- u64 payload[4];
-} __packed;
-
-/*
- * struct scif_allocmsg - Used with SCIF_ALLOC_REQ to request
- * the remote note to allocate memory
- *
- * phys_addr: Physical address of the buffer
- * vaddr: Virtual address of the buffer
- * size: Size of the buffer
- * state: Current state
- * allocwq: wait queue for status
- */
-struct scif_allocmsg {
- dma_addr_t phys_addr;
- unsigned long vaddr;
- size_t size;
- enum scif_msg_state state;
- wait_queue_head_t allocwq;
-};
-
-/*
- * struct scif_qp - Node Queue Pair
- *
- * Interesting structure -- a little difficult because we can only
- * write across the PCIe, so any r/w pointer we need to read is
- * local. We only need to read the read pointer on the inbound_q
- * and read the write pointer in the outbound_q
- *
- * @magic: Magic value to ensure the peer sees the QP correctly
- * @outbound_q: The outbound ring buffer for sending messages
- * @inbound_q: The inbound ring buffer for receiving messages
- * @local_write: Local write index
- * @local_read: Local read index
- * @remote_qp: The remote queue pair
- * @local_buf: DMA address of local ring buffer
- * @local_qp: DMA address of the local queue pair data structure
- * @remote_buf: DMA address of remote ring buffer
- * @qp_state: QP state i.e. online or offline used for P2P
- * @send_lock: synchronize access to outbound queue
- * @recv_lock: Synchronize access to inbound queue
- */
-struct scif_qp {
- u64 magic;
-#define SCIFEP_MAGIC 0x5c1f000000005c1fULL
- struct scif_rb outbound_q;
- struct scif_rb inbound_q;
-
- u32 local_write __aligned(64);
- u32 local_read __aligned(64);
- struct scif_qp *remote_qp;
- dma_addr_t local_buf;
- dma_addr_t local_qp;
- dma_addr_t remote_buf;
- u32 qp_state;
-#define SCIF_QP_OFFLINE 0xdead
-#define SCIF_QP_ONLINE 0xc0de
- spinlock_t send_lock;
- spinlock_t recv_lock;
-};
-
-/*
- * struct scif_loopb_msg - An element in the loopback Node QP message list.
- *
- * @msg - The SCIF node QP message
- * @list - link in the list of messages
- */
-struct scif_loopb_msg {
- struct scifmsg msg;
- struct list_head list;
-};
-
-int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
-int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp);
-int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp);
-int scif_setup_qp(struct scif_dev *scifdev);
-int scif_qp_response(phys_addr_t phys, struct scif_dev *dev);
-int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
- int local_size, struct scif_dev *scifdev);
-int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
- dma_addr_t phys, int local_size,
- struct scif_dev *scifdev);
-int scif_setup_qp_connect_response(struct scif_dev *scifdev,
- struct scif_qp *qp, u64 payload);
-int scif_setup_loopback_qp(struct scif_dev *scifdev);
-int scif_destroy_loopback_qp(struct scif_dev *scifdev);
-void scif_poll_qp_state(struct work_struct *work);
-void scif_destroy_p2p(struct scif_dev *scifdev);
-void scif_send_exit(struct scif_dev *scifdev);
-static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
- struct device *spdev_ret;
-
- rcu_read_lock();
- spdev = rcu_dereference(scifdev->spdev);
- if (spdev)
- spdev_ret = get_device(&spdev->dev);
- else
- spdev_ret = ERR_PTR(-ENODEV);
- rcu_read_unlock();
- return spdev_ret;
-}
-
-static inline void scif_put_peer_dev(struct device *dev)
-{
- put_device(dev);
-}
-#endif /* SCIF_NODEQP */
diff --git a/drivers/misc/mic/scif/scif_peer_bus.c b/drivers/misc/mic/scif/scif_peer_bus.c
deleted file mode 100644
index 6d608308bb60..000000000000
--- a/drivers/misc/mic/scif/scif_peer_bus.c
+++ /dev/null
@@ -1,175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include "../bus/scif_bus.h"
-#include "scif_peer_bus.h"
-
-static inline struct scif_peer_dev *
-dev_to_scif_peer(struct device *dev)
-{
- return container_of(dev, struct scif_peer_dev, dev);
-}
-
-struct bus_type scif_peer_bus = {
- .name = "scif_peer_bus",
-};
-
-static void scif_peer_release_dev(struct device *d)
-{
- struct scif_peer_dev *sdev = dev_to_scif_peer(d);
- struct scif_dev *scifdev = &scif_dev[sdev->dnode];
-
- scif_cleanup_scifdev(scifdev);
- kfree(sdev);
-}
-
-static int scif_peer_initialize_device(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
- int ret;
-
- spdev = kzalloc(sizeof(*spdev), GFP_KERNEL);
- if (!spdev) {
- ret = -ENOMEM;
- goto err;
- }
-
- spdev->dev.parent = scifdev->sdev->dev.parent;
- spdev->dev.release = scif_peer_release_dev;
- spdev->dnode = scifdev->node;
- spdev->dev.bus = &scif_peer_bus;
- dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
-
- device_initialize(&spdev->dev);
- get_device(&spdev->dev);
- rcu_assign_pointer(scifdev->spdev, spdev);
-
- mutex_lock(&scif_info.conflock);
- scif_info.total++;
- scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
- mutex_unlock(&scif_info.conflock);
- return 0;
-err:
- dev_err(&scifdev->sdev->dev,
- "dnode %d: initialize_device rc %d\n", scifdev->node, ret);
- return ret;
-}
-
-static int scif_peer_add_device(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev = rcu_dereference(scifdev->spdev);
- char pool_name[16];
- int ret;
-
- ret = device_add(&spdev->dev);
- put_device(&spdev->dev);
- if (ret) {
- dev_err(&scifdev->sdev->dev,
- "dnode %d: peer device_add failed\n", scifdev->node);
- goto put_spdev;
- }
-
- scnprintf(pool_name, sizeof(pool_name), "scif-%d", spdev->dnode);
- scifdev->signal_pool = dmam_pool_create(pool_name, &scifdev->sdev->dev,
- sizeof(struct scif_status), 1,
- 0);
- if (!scifdev->signal_pool) {
- dev_err(&scifdev->sdev->dev,
- "dnode %d: dmam_pool_create failed\n", scifdev->node);
- ret = -ENOMEM;
- goto del_spdev;
- }
- dev_dbg(&spdev->dev, "Added peer dnode %d\n", spdev->dnode);
- return 0;
-del_spdev:
- device_del(&spdev->dev);
-put_spdev:
- RCU_INIT_POINTER(scifdev->spdev, NULL);
- synchronize_rcu();
- put_device(&spdev->dev);
-
- mutex_lock(&scif_info.conflock);
- scif_info.total--;
- mutex_unlock(&scif_info.conflock);
- return ret;
-}
-
-void scif_add_peer_device(struct work_struct *work)
-{
- struct scif_dev *scifdev = container_of(work, struct scif_dev,
- peer_add_work);
-
- scif_peer_add_device(scifdev);
-}
-
-/*
- * Peer device registration is split into a device_initialize and a device_add.
- * The reason for doing this is as follows: First, peer device registration
- * itself cannot be done in the message processing thread and must be delegated
- * to another workqueue, otherwise if SCIF client probe, called during peer
- * device registration, calls scif_connect(..), it will block the message
- * processing thread causing a deadlock. Next, device_initialize is done in the
- * "top-half" message processing thread and device_add in the "bottom-half"
- * workqueue. If this is not done, SCIF_CNCT_REQ message processing executing
- * concurrently with SCIF_INIT message processing is unable to get a reference
- * on the peer device, thereby failing the connect request.
- */
-void scif_peer_register_device(struct scif_dev *scifdev)
-{
- int ret;
-
- mutex_lock(&scifdev->lock);
- ret = scif_peer_initialize_device(scifdev);
- if (ret)
- goto exit;
- schedule_work(&scifdev->peer_add_work);
-exit:
- mutex_unlock(&scifdev->lock);
-}
-
-int scif_peer_unregister_device(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
-
- mutex_lock(&scifdev->lock);
- /* Flush work to ensure device register is complete */
- flush_work(&scifdev->peer_add_work);
-
- /*
- * Continue holding scifdev->lock since theoretically unregister_device
- * can be called simultaneously from multiple threads
- */
- spdev = rcu_dereference(scifdev->spdev);
- if (!spdev) {
- mutex_unlock(&scifdev->lock);
- return -ENODEV;
- }
-
- RCU_INIT_POINTER(scifdev->spdev, NULL);
- synchronize_rcu();
- mutex_unlock(&scifdev->lock);
-
- dev_dbg(&spdev->dev, "Removing peer dnode %d\n", spdev->dnode);
- device_unregister(&spdev->dev);
-
- mutex_lock(&scif_info.conflock);
- scif_info.total--;
- mutex_unlock(&scif_info.conflock);
- return 0;
-}
-
-int scif_peer_bus_init(void)
-{
- return bus_register(&scif_peer_bus);
-}
-
-void scif_peer_bus_exit(void)
-{
- bus_unregister(&scif_peer_bus);
-}
diff --git a/drivers/misc/mic/scif/scif_peer_bus.h b/drivers/misc/mic/scif/scif_peer_bus.h
deleted file mode 100644
index 2ea4c51c18c1..000000000000
--- a/drivers/misc/mic/scif/scif_peer_bus.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef _SCIF_PEER_BUS_H_
-#define _SCIF_PEER_BUS_H_
-
-#include <linux/device.h>
-#include <linux/mic_common.h>
-#include <linux/scif.h>
-
-struct scif_dev;
-
-void scif_add_peer_device(struct work_struct *work);
-void scif_peer_register_device(struct scif_dev *sdev);
-int scif_peer_unregister_device(struct scif_dev *scifdev);
-int scif_peer_bus_init(void);
-void scif_peer_bus_exit(void);
-#endif /* _SCIF_PEER_BUS_H */
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
deleted file mode 100644
index 4bdb5ef9a139..000000000000
--- a/drivers/misc/mic/scif/scif_ports.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/idr.h>
-
-#include "scif_main.h"
-
-#define SCIF_PORT_COUNT 0x10000 /* Ports available */
-
-struct idr scif_ports;
-
-/**
- * struct scif_port - SCIF port information
- *
- * @ref_cnt: Reference count since there can be multiple endpoints
- * created via scif_accept(..) simultaneously using a port.
- */
-struct scif_port {
- int ref_cnt;
-};
-
-/**
- * __scif_get_port - Reserve a specified port # for SCIF and add it
- * to the global list.
- * @start: lowest port # to be reserved (inclusive).
- * @end: highest port # to be reserved (exclusive).
- *
- * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
- * On memory allocation failure, returns -ENOMEM.
- */
-static int __scif_get_port(int start, int end)
-{
- int id;
- struct scif_port *port = kzalloc(sizeof(*port), GFP_ATOMIC);
-
- if (!port)
- return -ENOMEM;
- spin_lock(&scif_info.port_lock);
- id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC);
- if (id >= 0)
- port->ref_cnt++;
- spin_unlock(&scif_info.port_lock);
- return id;
-}
-
-/**
- * scif_rsrv_port - Reserve a specified port # for SCIF.
- * @port : port # to be reserved.
- *
- * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
- * On memory allocation failure, returns -ENOMEM.
- */
-int scif_rsrv_port(u16 port)
-{
- return __scif_get_port(port, port + 1);
-}
-
-/**
- * scif_get_new_port - Get and reserve any port # for SCIF in the range
- * SCIF_PORT_RSVD + 1 to SCIF_PORT_COUNT - 1.
- *
- * @return : Allocated SCIF port #, or -ENOSPC if no ports available.
- * On memory allocation failure, returns -ENOMEM.
- */
-int scif_get_new_port(void)
-{
- return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT);
-}
-
-/**
- * scif_get_port - Increment the reference count for a SCIF port
- * @id : SCIF port
- *
- * @return : None
- */
-void scif_get_port(u16 id)
-{
- struct scif_port *port;
-
- if (!id)
- return;
- spin_lock(&scif_info.port_lock);
- port = idr_find(&scif_ports, id);
- if (port)
- port->ref_cnt++;
- spin_unlock(&scif_info.port_lock);
-}
-
-/**
- * scif_put_port - Release a reserved SCIF port
- * @id : SCIF port to be released.
- *
- * @return : None
- */
-void scif_put_port(u16 id)
-{
- struct scif_port *port;
-
- if (!id)
- return;
- spin_lock(&scif_info.port_lock);
- port = idr_find(&scif_ports, id);
- if (port) {
- port->ref_cnt--;
- if (!port->ref_cnt) {
- idr_remove(&scif_ports, id);
- kfree(port);
- }
- }
- spin_unlock(&scif_info.port_lock);
-}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
deleted file mode 100644
index e425882ae06d..000000000000
--- a/drivers/misc/mic/scif/scif_rb.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/circ_buf.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-
-#include "scif_rb.h"
-
-#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
-#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
-
-/**
- * scif_rb_init - Initializes the ring buffer
- * @rb: ring buffer
- * @read_ptr: A pointer to the read offset
- * @write_ptr: A pointer to the write offset
- * @rb_base: A pointer to the base of the ring buffer
- * @size: The size of the ring buffer in powers of two
- */
-void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
- void *rb_base, u8 size)
-{
- rb->rb_base = rb_base;
- rb->size = (1 << size);
- rb->read_ptr = read_ptr;
- rb->write_ptr = write_ptr;
- rb->current_read_offset = *read_ptr;
- rb->current_write_offset = *write_ptr;
-}
-
-/* Copies a message to the ring buffer -- handles the wrap around case */
-static void memcpy_torb(struct scif_rb *rb, void *header,
- void *msg, u32 size)
-{
- u32 size1, size2;
-
- if (header + size >= rb->rb_base + rb->size) {
- /* Need to call two copies if it wraps around */
- size1 = (u32)(rb->rb_base + rb->size - header);
- size2 = size - size1;
- memcpy_toio((void __iomem __force *)header, msg, size1);
- memcpy_toio((void __iomem __force *)rb->rb_base,
- msg + size1, size2);
- } else {
- memcpy_toio((void __iomem __force *)header, msg, size);
- }
-}
-
-/* Copies a message from the ring buffer -- handles the wrap around case */
-static void memcpy_fromrb(struct scif_rb *rb, void *header,
- void *msg, u32 size)
-{
- u32 size1, size2;
-
- if (header + size >= rb->rb_base + rb->size) {
- /* Need to call two copies if it wraps around */
- size1 = (u32)(rb->rb_base + rb->size - header);
- size2 = size - size1;
- memcpy_fromio(msg, (void __iomem __force *)header, size1);
- memcpy_fromio(msg + size1,
- (void __iomem __force *)rb->rb_base, size2);
- } else {
- memcpy_fromio(msg, (void __iomem __force *)header, size);
- }
-}
-
-/**
- * scif_rb_space - Query space available for writing to the RB
- * @rb: ring buffer
- *
- * Return: size available for writing to RB in bytes.
- */
-u32 scif_rb_space(struct scif_rb *rb)
-{
- rb->current_read_offset = *rb->read_ptr;
- /*
- * Update from the HW read pointer only once the peer has exposed the
- * new empty slot. This barrier is paired with the memory barrier
- * scif_rb_update_read_ptr()
- */
- mb();
- return scif_rb_ring_space(rb->current_write_offset,
- rb->current_read_offset, rb->size);
-}
-
-/**
- * scif_rb_write - Write a message to the RB
- * @rb: ring buffer
- * @msg: buffer to send the message. Must be at least size bytes long
- * @size: the size (in bytes) to be copied to the RB
- *
- * This API does not block if there isn't enough space in the RB.
- * Returns: 0 on success or -ENOMEM on failure
- */
-int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
-{
- void *header;
-
- if (scif_rb_space(rb) < size)
- return -ENOMEM;
- header = rb->rb_base + rb->current_write_offset;
- memcpy_torb(rb, header, msg, size);
- /*
- * Wait until scif_rb_commit(). Update the local ring
- * buffer data, not the shared data until commit.
- */
- rb->current_write_offset =
- (rb->current_write_offset + size) & (rb->size - 1);
- return 0;
-}
-
-/**
- * scif_rb_commit - To submit the message to let the peer fetch it
- * @rb: ring buffer
- */
-void scif_rb_commit(struct scif_rb *rb)
-{
- /*
- * We must ensure ordering between the all the data committed
- * previously before we expose the new message to the peer by
- * updating the write_ptr. This write barrier is paired with
- * the read barrier in scif_rb_count(..)
- */
- wmb();
- WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
-#ifdef CONFIG_INTEL_MIC_CARD
- /*
- * X100 Si bug: For the case where a Core is performing an EXT_WR
- * followed by a Doorbell Write, the Core must perform two EXT_WR to the
- * same address with the same data before it does the Doorbell Write.
- * This way, if ordering is violated for the Interrupt Message, it will
- * fall just behind the first Posted associated with the first EXT_WR.
- */
- WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
-#endif
-}
-
-/**
- * scif_rb_get - To get next message from the ring buffer
- * @rb: ring buffer
- * @size: Number of bytes to be read
- *
- * Return: NULL if no bytes to be read from the ring buffer, otherwise the
- * pointer to the next byte
- */
-static void *scif_rb_get(struct scif_rb *rb, u32 size)
-{
- void *header = NULL;
-
- if (scif_rb_count(rb, size) >= size)
- header = rb->rb_base + rb->current_read_offset;
- return header;
-}
-
-/*
- * scif_rb_get_next - Read from ring buffer.
- * @rb: ring buffer
- * @msg: buffer to hold the message. Must be at least size bytes long
- * @size: Number of bytes to be read
- *
- * Return: number of bytes read if available bytes are >= size, otherwise
- * returns zero.
- */
-u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
-{
- void *header = NULL;
- int read_size = 0;
-
- header = scif_rb_get(rb, size);
- if (header) {
- u32 next_cmd_offset =
- (rb->current_read_offset + size) & (rb->size - 1);
-
- read_size = size;
- rb->current_read_offset = next_cmd_offset;
- memcpy_fromrb(rb, header, msg, size);
- }
- return read_size;
-}
-
-/**
- * scif_rb_update_read_ptr
- * @rb: ring buffer
- */
-void scif_rb_update_read_ptr(struct scif_rb *rb)
-{
- u32 new_offset;
-
- new_offset = rb->current_read_offset;
- /*
- * We must ensure ordering between the all the data committed or read
- * previously before we expose the empty slot to the peer by updating
- * the read_ptr. This barrier is paired with the memory barrier in
- * scif_rb_space(..)
- */
- mb();
- WRITE_ONCE(*rb->read_ptr, new_offset);
-#ifdef CONFIG_INTEL_MIC_CARD
- /*
- * X100 Si Bug: For the case where a Core is performing an EXT_WR
- * followed by a Doorbell Write, the Core must perform two EXT_WR to the
- * same address with the same data before it does the Doorbell Write.
- * This way, if ordering is violated for the Interrupt Message, it will
- * fall just behind the first Posted associated with the first EXT_WR.
- */
- WRITE_ONCE(*rb->read_ptr, new_offset);
-#endif
-}
-
-/**
- * scif_rb_count
- * @rb: ring buffer
- * @size: Number of bytes expected to be read
- *
- * Return: number of bytes that can be read from the RB
- */
-u32 scif_rb_count(struct scif_rb *rb, u32 size)
-{
- if (scif_rb_ring_cnt(rb->current_write_offset,
- rb->current_read_offset,
- rb->size) < size) {
- rb->current_write_offset = *rb->write_ptr;
- /*
- * Update from the HW write pointer if empty only once the peer
- * has exposed the new message. This read barrier is paired
- * with the write barrier in scif_rb_commit(..)
- */
- smp_rmb();
- }
- return scif_rb_ring_cnt(rb->current_write_offset,
- rb->current_read_offset,
- rb->size);
-}
diff --git a/drivers/misc/mic/scif/scif_rb.h b/drivers/misc/mic/scif/scif_rb.h
deleted file mode 100644
index 166dffe3093d..000000000000
--- a/drivers/misc/mic/scif/scif_rb.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_RB_H
-#define SCIF_RB_H
-/*
- * This file describes a general purpose, byte based ring buffer. Writers to the
- * ring buffer need to synchronize using a lock. The same is true for readers,
- * although in practice, the ring buffer has a single reader. It is lockless
- * between producer and consumer so it can handle being used across the PCIe
- * bus. The ring buffer ensures that there are no reads across the PCIe bus for
- * performance reasons. Two of these are used to form a single bidirectional
- * queue-pair across PCIe.
- */
-/*
- * struct scif_rb - SCIF Ring Buffer
- *
- * @rb_base: The base of the memory used for storing RB messages
- * @read_ptr: Pointer to the read offset
- * @write_ptr: Pointer to the write offset
- * @size: Size of the memory in rb_base
- * @current_read_offset: Cached read offset for performance
- * @current_write_offset: Cached write offset for performance
- */
-struct scif_rb {
- void *rb_base;
- u32 *read_ptr;
- u32 *write_ptr;
- u32 size;
- u32 current_read_offset;
- u32 current_write_offset;
-};
-
-/* methods used by both */
-void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
- void *rb_base, u8 size);
-/* writer only methods */
-/* write a new command, then scif_rb_commit() */
-int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
-/* after write(), then scif_rb_commit() */
-void scif_rb_commit(struct scif_rb *rb);
-/* query space available for writing to a RB. */
-u32 scif_rb_space(struct scif_rb *rb);
-
-/* reader only methods */
-/* read a new message from the ring buffer of size bytes */
-u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
-/* update the read pointer so that the space can be reused */
-void scif_rb_update_read_ptr(struct scif_rb *rb);
-/* count the number of bytes that can be read */
-u32 scif_rb_count(struct scif_rb *rb, u32 size);
-#endif
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
deleted file mode 100644
index 18fb9d8b8a4b..000000000000
--- a/drivers/misc/mic/scif/scif_rma.c
+++ /dev/null
@@ -1,1760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/intel-iommu.h>
-#include <linux/pagemap.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/signal.h>
-
-#include "scif_main.h"
-#include "scif_map.h"
-
-/* Used to skip ulimit checks for registrations with SCIF_MAP_KERNEL flag */
-#define SCIF_MAP_ULIMIT 0x40
-
-bool scif_ulimit_check = 1;
-
-/**
- * scif_rma_ep_init:
- * @ep: end point
- *
- * Initialize RMA per EP data structures.
- */
-void scif_rma_ep_init(struct scif_endpt *ep)
-{
- struct scif_endpt_rma_info *rma = &ep->rma_info;
-
- mutex_init(&rma->rma_lock);
- init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
- spin_lock_init(&rma->tc_lock);
- mutex_init(&rma->mmn_lock);
- INIT_LIST_HEAD(&rma->reg_list);
- INIT_LIST_HEAD(&rma->remote_reg_list);
- atomic_set(&rma->tw_refcount, 0);
- atomic_set(&rma->tcw_refcount, 0);
- atomic_set(&rma->tcw_total_pages, 0);
- atomic_set(&rma->fence_refcount, 0);
-
- rma->async_list_del = 0;
- rma->dma_chan = NULL;
- INIT_LIST_HEAD(&rma->mmn_list);
- INIT_LIST_HEAD(&rma->vma_list);
- init_waitqueue_head(&rma->markwq);
-}
-
-/**
- * scif_rma_ep_can_uninit:
- * @ep: end point
- *
- * Returns 1 if an endpoint can be uninitialized and 0 otherwise.
- */
-int scif_rma_ep_can_uninit(struct scif_endpt *ep)
-{
- int ret = 0;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Destroy RMA Info only if both lists are empty */
- if (list_empty(&ep->rma_info.reg_list) &&
- list_empty(&ep->rma_info.remote_reg_list) &&
- list_empty(&ep->rma_info.mmn_list) &&
- !atomic_read(&ep->rma_info.tw_refcount) &&
- !atomic_read(&ep->rma_info.tcw_refcount) &&
- !atomic_read(&ep->rma_info.fence_refcount))
- ret = 1;
- mutex_unlock(&ep->rma_info.rma_lock);
- return ret;
-}
-
-/**
- * scif_create_pinned_pages:
- * @nr_pages: number of pages in window
- * @prot: read/write protection
- *
- * Allocate and prepare a set of pinned pages.
- */
-static struct scif_pinned_pages *
-scif_create_pinned_pages(int nr_pages, int prot)
-{
- struct scif_pinned_pages *pin;
-
- might_sleep();
- pin = scif_zalloc(sizeof(*pin));
- if (!pin)
- goto error;
-
- pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages));
- if (!pin->pages)
- goto error_free_pinned_pages;
-
- pin->prot = prot;
- pin->magic = SCIFEP_MAGIC;
- return pin;
-
-error_free_pinned_pages:
- scif_free(pin, sizeof(*pin));
-error:
- return NULL;
-}
-
-/**
- * scif_destroy_pinned_pages:
- * @pin: A set of pinned pages.
- *
- * Deallocate resources for pinned pages.
- */
-static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin)
-{
- int j;
- int writeable = pin->prot & SCIF_PROT_WRITE;
- int kernel = SCIF_MAP_KERNEL & pin->map_flags;
-
- if (kernel) {
- for (j = 0; j < pin->nr_pages; j++) {
- if (pin->pages[j] && !kernel) {
- if (writeable)
- set_page_dirty_lock(pin->pages[j]);
- put_page(pin->pages[j]);
- }
- }
- } else
- unpin_user_pages_dirty_lock(pin->pages, pin->nr_pages,
- writeable);
- scif_free(pin->pages,
- pin->nr_pages * sizeof(*pin->pages));
- scif_free(pin, sizeof(*pin));
- return 0;
-}
-
-/*
- * scif_create_window:
- * @ep: end point
- * @nr_pages: number of pages
- * @offset: registration offset
- * @temp: true if a temporary window is being created
- *
- * Allocate and prepare a self registration window.
- */
-struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
- s64 offset, bool temp)
-{
- struct scif_window *window;
-
- might_sleep();
- window = scif_zalloc(sizeof(*window));
- if (!window)
- goto error;
-
- window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr));
- if (!window->dma_addr)
- goto error_free_window;
-
- window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages));
- if (!window->num_pages)
- goto error_free_window;
-
- window->offset = offset;
- window->ep = (u64)ep;
- window->magic = SCIFEP_MAGIC;
- window->reg_state = OP_IDLE;
- init_waitqueue_head(&window->regwq);
- window->unreg_state = OP_IDLE;
- init_waitqueue_head(&window->unregwq);
- INIT_LIST_HEAD(&window->list);
- window->type = SCIF_WINDOW_SELF;
- window->temp = temp;
- return window;
-
-error_free_window:
- scif_free(window->dma_addr,
- nr_pages * sizeof(*window->dma_addr));
- scif_free(window, sizeof(*window));
-error:
- return NULL;
-}
-
-/**
- * scif_destroy_incomplete_window:
- * @ep: end point
- * @window: registration window
- *
- * Deallocate resources for self window.
- */
-static void scif_destroy_incomplete_window(struct scif_endpt *ep,
- struct scif_window *window)
-{
- int err;
- int nr_pages = window->nr_pages;
- struct scif_allocmsg *alloc = &window->alloc_handle;
- struct scifmsg msg;
-
-retry:
- /* Wait for a SCIF_ALLOC_GNT/REJ message */
- err = wait_event_timeout(alloc->allocwq,
- alloc->state != OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (alloc->state == OP_COMPLETED) {
- msg.uop = SCIF_FREE_VIRT;
- msg.src = ep->port;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = window->alloc_handle.vaddr;
- msg.payload[2] = (u64)window;
- msg.payload[3] = SCIF_REGISTER;
- _scif_nodeqp_send(ep->remote_dev, &msg);
- }
- mutex_unlock(&ep->rma_info.rma_lock);
-
- scif_free_window_offset(ep, window, window->offset);
- scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr));
- scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
- scif_free(window, sizeof(*window));
-}
-
-/**
- * scif_unmap_window:
- * @remote_dev: SCIF remote device
- * @window: registration window
- *
- * Delete any DMA mappings created for a registered self window
- */
-void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window)
-{
- int j;
-
- if (scif_is_iommu_enabled() && !scifdev_self(remote_dev)) {
- if (window->st) {
- dma_unmap_sg(&remote_dev->sdev->dev,
- window->st->sgl, window->st->nents,
- DMA_BIDIRECTIONAL);
- sg_free_table(window->st);
- kfree(window->st);
- window->st = NULL;
- }
- } else {
- for (j = 0; j < window->nr_contig_chunks; j++) {
- if (window->dma_addr[j]) {
- scif_unmap_single(window->dma_addr[j],
- remote_dev,
- window->num_pages[j] <<
- PAGE_SHIFT);
- window->dma_addr[j] = 0x0;
- }
- }
- }
-}
-
-static inline struct mm_struct *__scif_acquire_mm(void)
-{
- if (scif_ulimit_check)
- return get_task_mm(current);
- return NULL;
-}
-
-static inline void __scif_release_mm(struct mm_struct *mm)
-{
- if (mm)
- mmput(mm);
-}
-
-static inline int
-__scif_dec_pinned_vm_lock(struct mm_struct *mm,
- int nr_pages)
-{
- if (!mm || !nr_pages || !scif_ulimit_check)
- return 0;
-
- atomic64_sub(nr_pages, &mm->pinned_vm);
- return 0;
-}
-
-static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
- int nr_pages)
-{
- unsigned long locked, lock_limit;
-
- if (!mm || !nr_pages || !scif_ulimit_check)
- return 0;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- locked = atomic64_add_return(nr_pages, &mm->pinned_vm);
-
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
- atomic64_sub(nr_pages, &mm->pinned_vm);
- dev_err(scif_info.mdev.this_device,
- "locked(%lu) > lock_limit(%lu)\n",
- locked, lock_limit);
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * scif_destroy_window:
- * @ep: end point
- * @window: registration window
- *
- * Deallocate resources for self window.
- */
-int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window)
-{
- int j;
- struct scif_pinned_pages *pinned_pages = window->pinned_pages;
- int nr_pages = window->nr_pages;
-
- might_sleep();
- if (!window->temp && window->mm) {
- __scif_dec_pinned_vm_lock(window->mm, window->nr_pages);
- __scif_release_mm(window->mm);
- window->mm = NULL;
- }
-
- scif_free_window_offset(ep, window, window->offset);
- scif_unmap_window(ep->remote_dev, window);
- /*
- * Decrement references for this set of pinned pages from
- * this window.
- */
- j = atomic_sub_return(1, &pinned_pages->ref_count);
- if (j < 0)
- dev_err(scif_info.mdev.this_device,
- "%s %d incorrect ref count %d\n",
- __func__, __LINE__, j);
- /*
- * If the ref count for pinned_pages is zero then someone
- * has already called scif_unpin_pages() for it and we should
- * destroy the page cache.
- */
- if (!j)
- scif_destroy_pinned_pages(window->pinned_pages);
- scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr));
- scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
- window->magic = 0;
- scif_free(window, sizeof(*window));
- return 0;
-}
-
-/**
- * scif_create_remote_lookup:
- * @remote_dev: SCIF remote device
- * @window: remote window
- *
- * Allocate and prepare lookup entries for the remote
- * end to copy over the physical addresses.
- * Returns 0 on success and appropriate errno on failure.
- */
-static int scif_create_remote_lookup(struct scif_dev *remote_dev,
- struct scif_window *window)
-{
- int i, j, err = 0;
- int nr_pages = window->nr_pages;
- bool vmalloc_dma_phys, vmalloc_num_pages;
-
- might_sleep();
- /* Map window */
- err = scif_map_single(&window->mapped_offset,
- window, remote_dev, sizeof(*window));
- if (err)
- goto error_window;
-
- /* Compute the number of lookup entries. 21 == 2MB Shift */
- window->nr_lookup = ALIGN(nr_pages * PAGE_SIZE,
- ((2) * 1024 * 1024)) >> 21;
-
- window->dma_addr_lookup.lookup =
- scif_alloc_coherent(&window->dma_addr_lookup.offset,
- remote_dev, window->nr_lookup *
- sizeof(*window->dma_addr_lookup.lookup),
- GFP_KERNEL | __GFP_ZERO);
- if (!window->dma_addr_lookup.lookup) {
- err = -ENOMEM;
- goto error_window;
- }
-
- window->num_pages_lookup.lookup =
- scif_alloc_coherent(&window->num_pages_lookup.offset,
- remote_dev, window->nr_lookup *
- sizeof(*window->num_pages_lookup.lookup),
- GFP_KERNEL | __GFP_ZERO);
- if (!window->num_pages_lookup.lookup) {
- err = -ENOMEM;
- goto error_window;
- }
-
- vmalloc_dma_phys = is_vmalloc_addr(&window->dma_addr[0]);
- vmalloc_num_pages = is_vmalloc_addr(&window->num_pages[0]);
-
- /* Now map each of the pages containing physical addresses */
- for (i = 0, j = 0; i < nr_pages; i += SCIF_NR_ADDR_IN_PAGE, j++) {
- err = scif_map_page(&window->dma_addr_lookup.lookup[j],
- vmalloc_dma_phys ?
- vmalloc_to_page(&window->dma_addr[i]) :
- virt_to_page(&window->dma_addr[i]),
- remote_dev);
- if (err)
- goto error_window;
- err = scif_map_page(&window->num_pages_lookup.lookup[j],
- vmalloc_num_pages ?
- vmalloc_to_page(&window->num_pages[i]) :
- virt_to_page(&window->num_pages[i]),
- remote_dev);
- if (err)
- goto error_window;
- }
- return 0;
-error_window:
- return err;
-}
-
-/**
- * scif_destroy_remote_lookup:
- * @remote_dev: SCIF remote device
- * @window: remote window
- *
- * Destroy lookup entries used for the remote
- * end to copy over the physical addresses.
- */
-static void scif_destroy_remote_lookup(struct scif_dev *remote_dev,
- struct scif_window *window)
-{
- int i, j;
-
- if (window->nr_lookup) {
- struct scif_rma_lookup *lup = &window->dma_addr_lookup;
- struct scif_rma_lookup *npup = &window->num_pages_lookup;
-
- for (i = 0, j = 0; i < window->nr_pages;
- i += SCIF_NR_ADDR_IN_PAGE, j++) {
- if (lup->lookup && lup->lookup[j])
- scif_unmap_single(lup->lookup[j],
- remote_dev,
- PAGE_SIZE);
- if (npup->lookup && npup->lookup[j])
- scif_unmap_single(npup->lookup[j],
- remote_dev,
- PAGE_SIZE);
- }
- if (lup->lookup)
- scif_free_coherent(lup->lookup, lup->offset,
- remote_dev, window->nr_lookup *
- sizeof(*lup->lookup));
- if (npup->lookup)
- scif_free_coherent(npup->lookup, npup->offset,
- remote_dev, window->nr_lookup *
- sizeof(*npup->lookup));
- if (window->mapped_offset)
- scif_unmap_single(window->mapped_offset,
- remote_dev, sizeof(*window));
- window->nr_lookup = 0;
- }
-}
-
-/**
- * scif_create_remote_window:
- * @scifdev: SCIF device
- * @nr_pages: number of pages in window
- *
- * Allocate and prepare a remote registration window.
- */
-static struct scif_window *
-scif_create_remote_window(struct scif_dev *scifdev, int nr_pages)
-{
- struct scif_window *window;
-
- might_sleep();
- window = scif_zalloc(sizeof(*window));
- if (!window)
- goto error_ret;
-
- window->magic = SCIFEP_MAGIC;
- window->nr_pages = nr_pages;
-
- window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr));
- if (!window->dma_addr)
- goto error_window;
-
- window->num_pages = scif_zalloc(nr_pages *
- sizeof(*window->num_pages));
- if (!window->num_pages)
- goto error_window;
-
- if (scif_create_remote_lookup(scifdev, window))
- goto error_window;
-
- window->type = SCIF_WINDOW_PEER;
- window->unreg_state = OP_IDLE;
- INIT_LIST_HEAD(&window->list);
- return window;
-error_window:
- scif_destroy_remote_window(window);
-error_ret:
- return NULL;
-}
-
-/**
- * scif_destroy_remote_window:
- * @window: remote registration window
- *
- * Deallocate resources for remote window.
- */
-void
-scif_destroy_remote_window(struct scif_window *window)
-{
- scif_free(window->dma_addr, window->nr_pages *
- sizeof(*window->dma_addr));
- scif_free(window->num_pages, window->nr_pages *
- sizeof(*window->num_pages));
- window->magic = 0;
- scif_free(window, sizeof(*window));
-}
-
-/**
- * scif_iommu_map: create DMA mappings if the IOMMU is enabled
- * @remote_dev: SCIF remote device
- * @window: remote registration window
- *
- * Map the physical pages using dma_map_sg(..) and then detect the number
- * of contiguous DMA mappings allocated
- */
-static int scif_iommu_map(struct scif_dev *remote_dev,
- struct scif_window *window)
-{
- struct scatterlist *sg;
- int i, err;
- scif_pinned_pages_t pin = window->pinned_pages;
-
- window->st = kzalloc(sizeof(*window->st), GFP_KERNEL);
- if (!window->st)
- return -ENOMEM;
-
- err = sg_alloc_table(window->st, window->nr_pages, GFP_KERNEL);
- if (err)
- return err;
-
- for_each_sg(window->st->sgl, sg, window->st->nents, i)
- sg_set_page(sg, pin->pages[i], PAGE_SIZE, 0x0);
-
- err = dma_map_sg(&remote_dev->sdev->dev, window->st->sgl,
- window->st->nents, DMA_BIDIRECTIONAL);
- if (!err)
- return -ENOMEM;
- /* Detect contiguous ranges of DMA mappings */
- sg = window->st->sgl;
- for (i = 0; sg; i++) {
- dma_addr_t last_da;
-
- window->dma_addr[i] = sg_dma_address(sg);
- window->num_pages[i] = sg_dma_len(sg) >> PAGE_SHIFT;
- last_da = sg_dma_address(sg) + sg_dma_len(sg);
- while ((sg = sg_next(sg)) && sg_dma_address(sg) == last_da) {
- window->num_pages[i] +=
- (sg_dma_len(sg) >> PAGE_SHIFT);
- last_da = window->dma_addr[i] +
- sg_dma_len(sg);
- }
- window->nr_contig_chunks++;
- }
- return 0;
-}
-
-/**
- * scif_map_window:
- * @remote_dev: SCIF remote device
- * @window: self registration window
- *
- * Map pages of a window into the aperture/PCI.
- * Also determine addresses required for DMA.
- */
-int
-scif_map_window(struct scif_dev *remote_dev, struct scif_window *window)
-{
- int i, j, k, err = 0, nr_contig_pages;
- scif_pinned_pages_t pin;
- phys_addr_t phys_prev, phys_curr;
-
- might_sleep();
-
- pin = window->pinned_pages;
-
- if (intel_iommu_enabled && !scifdev_self(remote_dev))
- return scif_iommu_map(remote_dev, window);
-
- for (i = 0, j = 0; i < window->nr_pages; i += nr_contig_pages, j++) {
- phys_prev = page_to_phys(pin->pages[i]);
- nr_contig_pages = 1;
-
- /* Detect physically contiguous chunks */
- for (k = i + 1; k < window->nr_pages; k++) {
- phys_curr = page_to_phys(pin->pages[k]);
- if (phys_curr != (phys_prev + PAGE_SIZE))
- break;
- phys_prev = phys_curr;
- nr_contig_pages++;
- }
- window->num_pages[j] = nr_contig_pages;
- window->nr_contig_chunks++;
- if (scif_is_mgmt_node()) {
- /*
- * Management node has to deal with SMPT on X100 and
- * hence the DMA mapping is required
- */
- err = scif_map_single(&window->dma_addr[j],
- phys_to_virt(page_to_phys(
- pin->pages[i])),
- remote_dev,
- nr_contig_pages << PAGE_SHIFT);
- if (err)
- return err;
- } else {
- window->dma_addr[j] = page_to_phys(pin->pages[i]);
- }
- }
- return err;
-}
-
-/**
- * scif_send_scif_unregister:
- * @ep: end point
- * @window: self registration window
- *
- * Send a SCIF_UNREGISTER message.
- */
-static int scif_send_scif_unregister(struct scif_endpt *ep,
- struct scif_window *window)
-{
- struct scifmsg msg;
-
- msg.uop = SCIF_UNREGISTER;
- msg.src = ep->port;
- msg.payload[0] = window->alloc_handle.vaddr;
- msg.payload[1] = (u64)window;
- return scif_nodeqp_send(ep->remote_dev, &msg);
-}
-
-/**
- * scif_unregister_window:
- * @window: self registration window
- *
- * Send an unregistration request and wait for a response.
- */
-int scif_unregister_window(struct scif_window *window)
-{
- int err = 0;
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
- bool send_msg = false;
-
- might_sleep();
- switch (window->unreg_state) {
- case OP_IDLE:
- {
- window->unreg_state = OP_IN_PROGRESS;
- send_msg = true;
- }
- fallthrough;
- case OP_IN_PROGRESS:
- {
- scif_get_window(window, 1);
- mutex_unlock(&ep->rma_info.rma_lock);
- if (send_msg) {
- err = scif_send_scif_unregister(ep, window);
- if (err) {
- window->unreg_state = OP_COMPLETED;
- goto done;
- }
- } else {
- /* Return ENXIO since unregistration is in progress */
- mutex_lock(&ep->rma_info.rma_lock);
- return -ENXIO;
- }
-retry:
- /* Wait for a SCIF_UNREGISTER_(N)ACK message */
- err = wait_event_timeout(window->unregwq,
- window->unreg_state != OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- if (!err) {
- err = -ENODEV;
- window->unreg_state = OP_COMPLETED;
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- }
- if (err > 0)
- err = 0;
-done:
- mutex_lock(&ep->rma_info.rma_lock);
- scif_put_window(window, 1);
- break;
- }
- case OP_FAILED:
- {
- if (!scifdev_alive(ep)) {
- err = -ENODEV;
- window->unreg_state = OP_COMPLETED;
- }
- break;
- }
- case OP_COMPLETED:
- break;
- default:
- err = -ENODEV;
- }
-
- if (window->unreg_state == OP_COMPLETED && window->ref_count)
- scif_put_window(window, window->nr_pages);
-
- if (!window->ref_count) {
- atomic_inc(&ep->rma_info.tw_refcount);
- list_del_init(&window->list);
- scif_free_window_offset(ep, window, window->offset);
- mutex_unlock(&ep->rma_info.rma_lock);
- if ((!!(window->pinned_pages->map_flags & SCIF_MAP_KERNEL)) &&
- scifdev_alive(ep)) {
- scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan);
- } else {
- if (!__scif_dec_pinned_vm_lock(window->mm,
- window->nr_pages)) {
- __scif_release_mm(window->mm);
- window->mm = NULL;
- }
- }
- scif_queue_for_cleanup(window, &scif_info.rma);
- mutex_lock(&ep->rma_info.rma_lock);
- }
- return err;
-}
-
-/**
- * scif_send_alloc_request:
- * @ep: end point
- * @window: self registration window
- *
- * Send a remote window allocation request
- */
-static int scif_send_alloc_request(struct scif_endpt *ep,
- struct scif_window *window)
-{
- struct scifmsg msg;
- struct scif_allocmsg *alloc = &window->alloc_handle;
-
- /* Set up the Alloc Handle */
- alloc->state = OP_IN_PROGRESS;
- init_waitqueue_head(&alloc->allocwq);
-
- /* Send out an allocation request */
- msg.uop = SCIF_ALLOC_REQ;
- msg.payload[1] = window->nr_pages;
- msg.payload[2] = (u64)&window->alloc_handle;
- return _scif_nodeqp_send(ep->remote_dev, &msg);
-}
-
-/**
- * scif_prep_remote_window:
- * @ep: end point
- * @window: self registration window
- *
- * Send a remote window allocation request, wait for an allocation response,
- * and prepares the remote window by copying over the page lists
- */
-static int scif_prep_remote_window(struct scif_endpt *ep,
- struct scif_window *window)
-{
- struct scifmsg msg;
- struct scif_window *remote_window;
- struct scif_allocmsg *alloc = &window->alloc_handle;
- dma_addr_t *dma_phys_lookup, *tmp, *num_pages_lookup, *tmp1;
- int i = 0, j = 0;
- int nr_contig_chunks, loop_nr_contig_chunks;
- int remaining_nr_contig_chunks, nr_lookup;
- int err, map_err;
-
- map_err = scif_map_window(ep->remote_dev, window);
- if (map_err)
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d map_err %d\n", __func__, __LINE__, map_err);
- remaining_nr_contig_chunks = window->nr_contig_chunks;
- nr_contig_chunks = window->nr_contig_chunks;
-retry:
- /* Wait for a SCIF_ALLOC_GNT/REJ message */
- err = wait_event_timeout(alloc->allocwq,
- alloc->state != OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- mutex_lock(&ep->rma_info.rma_lock);
- /* Synchronize with the thread waking up allocwq */
- mutex_unlock(&ep->rma_info.rma_lock);
- if (!err && scifdev_alive(ep))
- goto retry;
-
- if (!err)
- err = -ENODEV;
-
- if (err > 0)
- err = 0;
- else
- return err;
-
- /* Bail out. The remote end rejected this request */
- if (alloc->state == OP_FAILED)
- return -ENOMEM;
-
- if (map_err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, map_err);
- msg.uop = SCIF_FREE_VIRT;
- msg.src = ep->port;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = window->alloc_handle.vaddr;
- msg.payload[2] = (u64)window;
- msg.payload[3] = SCIF_REGISTER;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- return err;
- }
-
- remote_window = scif_ioremap(alloc->phys_addr, sizeof(*window),
- ep->remote_dev);
-
- /* Compute the number of lookup entries. 21 == 2MB Shift */
- nr_lookup = ALIGN(nr_contig_chunks, SCIF_NR_ADDR_IN_PAGE)
- >> ilog2(SCIF_NR_ADDR_IN_PAGE);
-
- dma_phys_lookup =
- scif_ioremap(remote_window->dma_addr_lookup.offset,
- nr_lookup *
- sizeof(*remote_window->dma_addr_lookup.lookup),
- ep->remote_dev);
- num_pages_lookup =
- scif_ioremap(remote_window->num_pages_lookup.offset,
- nr_lookup *
- sizeof(*remote_window->num_pages_lookup.lookup),
- ep->remote_dev);
-
- while (remaining_nr_contig_chunks) {
- loop_nr_contig_chunks = min_t(int, remaining_nr_contig_chunks,
- (int)SCIF_NR_ADDR_IN_PAGE);
- /* #1/2 - Copy physical addresses over to the remote side */
-
- /* #2/2 - Copy DMA addresses (addresses that are fed into the
- * DMA engine) We transfer bus addresses which are then
- * converted into a MIC physical address on the remote
- * side if it is a MIC, if the remote node is a mgmt node we
- * transfer the MIC physical address
- */
- tmp = scif_ioremap(dma_phys_lookup[j],
- loop_nr_contig_chunks *
- sizeof(*window->dma_addr),
- ep->remote_dev);
- tmp1 = scif_ioremap(num_pages_lookup[j],
- loop_nr_contig_chunks *
- sizeof(*window->num_pages),
- ep->remote_dev);
- if (scif_is_mgmt_node()) {
- memcpy_toio((void __force __iomem *)tmp,
- &window->dma_addr[i], loop_nr_contig_chunks
- * sizeof(*window->dma_addr));
- memcpy_toio((void __force __iomem *)tmp1,
- &window->num_pages[i], loop_nr_contig_chunks
- * sizeof(*window->num_pages));
- } else {
- if (scifdev_is_p2p(ep->remote_dev)) {
- /*
- * add remote node's base address for this node
- * to convert it into a MIC address
- */
- int m;
- dma_addr_t dma_addr;
-
- for (m = 0; m < loop_nr_contig_chunks; m++) {
- dma_addr = window->dma_addr[i + m] +
- ep->remote_dev->base_addr;
- writeq(dma_addr,
- (void __force __iomem *)&tmp[m]);
- }
- memcpy_toio((void __force __iomem *)tmp1,
- &window->num_pages[i],
- loop_nr_contig_chunks
- * sizeof(*window->num_pages));
- } else {
- /* Mgmt node or loopback - transfer DMA
- * addresses as is, this is the same as a
- * MIC physical address (we use the dma_addr
- * and not the phys_addr array since the
- * phys_addr is only setup if there is a mmap()
- * request from the mgmt node)
- */
- memcpy_toio((void __force __iomem *)tmp,
- &window->dma_addr[i],
- loop_nr_contig_chunks *
- sizeof(*window->dma_addr));
- memcpy_toio((void __force __iomem *)tmp1,
- &window->num_pages[i],
- loop_nr_contig_chunks *
- sizeof(*window->num_pages));
- }
- }
- remaining_nr_contig_chunks -= loop_nr_contig_chunks;
- i += loop_nr_contig_chunks;
- j++;
- scif_iounmap(tmp, loop_nr_contig_chunks *
- sizeof(*window->dma_addr), ep->remote_dev);
- scif_iounmap(tmp1, loop_nr_contig_chunks *
- sizeof(*window->num_pages), ep->remote_dev);
- }
-
- /* Prepare the remote window for the peer */
- remote_window->peer_window = (u64)window;
- remote_window->offset = window->offset;
- remote_window->prot = window->prot;
- remote_window->nr_contig_chunks = nr_contig_chunks;
- remote_window->ep = ep->remote_ep;
- scif_iounmap(num_pages_lookup,
- nr_lookup *
- sizeof(*remote_window->num_pages_lookup.lookup),
- ep->remote_dev);
- scif_iounmap(dma_phys_lookup,
- nr_lookup *
- sizeof(*remote_window->dma_addr_lookup.lookup),
- ep->remote_dev);
- scif_iounmap(remote_window, sizeof(*remote_window), ep->remote_dev);
- window->peer_window = alloc->vaddr;
- return err;
-}
-
-/**
- * scif_send_scif_register:
- * @ep: end point
- * @window: self registration window
- *
- * Send a SCIF_REGISTER message if EP is connected and wait for a
- * SCIF_REGISTER_(N)ACK message else send a SCIF_FREE_VIRT
- * message so that the peer can free its remote window allocated earlier.
- */
-static int scif_send_scif_register(struct scif_endpt *ep,
- struct scif_window *window)
-{
- int err = 0;
- struct scifmsg msg;
-
- msg.src = ep->port;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = window->alloc_handle.vaddr;
- msg.payload[2] = (u64)window;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED) {
- msg.uop = SCIF_REGISTER;
- window->reg_state = OP_IN_PROGRESS;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- spin_unlock(&ep->lock);
- if (!err) {
-retry:
- /* Wait for a SCIF_REGISTER_(N)ACK message */
- err = wait_event_timeout(window->regwq,
- window->reg_state !=
- OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- err = !err ? -ENODEV : 0;
- if (window->reg_state == OP_FAILED)
- err = -ENOTCONN;
- }
- } else {
- msg.uop = SCIF_FREE_VIRT;
- msg.payload[3] = SCIF_REGISTER;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- spin_unlock(&ep->lock);
- if (!err)
- err = -ENOTCONN;
- }
- return err;
-}
-
-/**
- * scif_get_window_offset:
- * @ep: end point descriptor
- * @flags: flags
- * @offset: offset hint
- * @num_pages: number of pages
- * @out_offset: computed offset returned by reference.
- *
- * Compute/Claim a new offset for this EP.
- */
-int scif_get_window_offset(struct scif_endpt *ep, int flags, s64 offset,
- int num_pages, s64 *out_offset)
-{
- s64 page_index;
- struct iova *iova_ptr;
- int err = 0;
-
- if (flags & SCIF_MAP_FIXED) {
- page_index = SCIF_IOVA_PFN(offset);
- iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index,
- page_index + num_pages - 1);
- if (!iova_ptr)
- err = -EADDRINUSE;
- } else {
- iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages,
- SCIF_DMA_63BIT_PFN - 1, 0);
- if (!iova_ptr)
- err = -ENOMEM;
- }
- if (!err)
- *out_offset = (iova_ptr->pfn_lo) << PAGE_SHIFT;
- return err;
-}
-
-/**
- * scif_free_window_offset:
- * @ep: end point descriptor
- * @window: registration window
- * @offset: Offset to be freed
- *
- * Free offset for this EP. The callee is supposed to grab
- * the RMA mutex before calling this API.
- */
-void scif_free_window_offset(struct scif_endpt *ep,
- struct scif_window *window, s64 offset)
-{
- if ((window && !window->offset_freed) || !window) {
- free_iova(&ep->rma_info.iovad, offset >> PAGE_SHIFT);
- if (window)
- window->offset_freed = true;
- }
-}
-
-/**
- * scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side is requesting a memory allocation.
- */
-void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- int err;
- struct scif_window *window = NULL;
- int nr_pages = msg->payload[1];
-
- window = scif_create_remote_window(scifdev, nr_pages);
- if (!window) {
- err = -ENOMEM;
- goto error;
- }
-
- /* The peer's allocation request is granted */
- msg->uop = SCIF_ALLOC_GNT;
- msg->payload[0] = (u64)window;
- msg->payload[1] = window->mapped_offset;
- err = scif_nodeqp_send(scifdev, msg);
- if (err)
- scif_destroy_remote_window(window);
- return;
-error:
- /* The peer's allocation request is rejected */
- dev_err(&scifdev->sdev->dev,
- "%s %d error %d alloc_ptr %p nr_pages 0x%x\n",
- __func__, __LINE__, err, window, nr_pages);
- msg->uop = SCIF_ALLOC_REJ;
- scif_nodeqp_send(scifdev, msg);
-}
-
-/**
- * scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side responded to a memory allocation.
- */
-void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_allocmsg *handle = (struct scif_allocmsg *)msg->payload[2];
- struct scif_window *window = container_of(handle, struct scif_window,
- alloc_handle);
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- handle->vaddr = msg->payload[0];
- handle->phys_addr = msg->payload[1];
- if (msg->uop == SCIF_ALLOC_GNT)
- handle->state = OP_COMPLETED;
- else
- handle->state = OP_FAILED;
- wake_up(&handle->allocwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Free up memory kmalloc'd earlier.
- */
-void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window = (struct scif_window *)msg->payload[1];
-
- scif_destroy_remote_window(window);
-}
-
-static void
-scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window)
-{
- int j;
- struct scif_hw_dev *sdev = dev->sdev;
- phys_addr_t apt_base = 0;
-
- /*
- * Add the aperture base if the DMA address is not card relative
- * since the DMA addresses need to be an offset into the bar
- */
- if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
- sdev->aper && !sdev->card_rel_da)
- apt_base = sdev->aper->pa;
- else
- return;
-
- for (j = 0; j < window->nr_contig_chunks; j++) {
- if (window->num_pages[j])
- window->dma_addr[j] += apt_base;
- else
- break;
- }
-}
-
-/**
- * scif_recv_reg: Respond to SCIF_REGISTER interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Update remote window list with a new registered window.
- */
-void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_window *window =
- (struct scif_window *)msg->payload[1];
-
- mutex_lock(&ep->rma_info.rma_lock);
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED) {
- msg->uop = SCIF_REGISTER_ACK;
- scif_nodeqp_send(ep->remote_dev, msg);
- scif_fixup_aper_base(ep->remote_dev, window);
- /* No further failures expected. Insert new window */
- scif_insert_window(window, &ep->rma_info.remote_reg_list);
- } else {
- msg->uop = SCIF_REGISTER_NACK;
- scif_nodeqp_send(ep->remote_dev, msg);
- }
- spin_unlock(&ep->lock);
- mutex_unlock(&ep->rma_info.rma_lock);
- /* free up any lookup resources now that page lists are transferred */
- scif_destroy_remote_lookup(ep->remote_dev, window);
- /*
- * We could not insert the window but we need to
- * destroy the window.
- */
- if (msg->uop == SCIF_REGISTER_NACK)
- scif_destroy_remote_window(window);
-}
-
-/**
- * scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remove window from remote registration list;
- */
-void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- struct scif_window *recv_window =
- (struct scif_window *)msg->payload[0];
- struct scif_endpt *ep;
- int del_window = 0;
-
- ep = (struct scif_endpt *)recv_window->ep;
- req.out_window = &window;
- req.offset = recv_window->offset;
- req.prot = 0;
- req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
- req.type = SCIF_WINDOW_FULL;
- req.head = &ep->rma_info.remote_reg_list;
- msg->payload[0] = ep->remote_ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- if (scif_query_window(&req)) {
- dev_err(&scifdev->sdev->dev,
- "%s %d -ENXIO\n", __func__, __LINE__);
- msg->uop = SCIF_UNREGISTER_ACK;
- goto error;
- }
- if (window) {
- if (window->ref_count)
- scif_put_window(window, window->nr_pages);
- else
- dev_err(&scifdev->sdev->dev,
- "%s %d ref count should be +ve\n",
- __func__, __LINE__);
- window->unreg_state = OP_COMPLETED;
- if (!window->ref_count) {
- msg->uop = SCIF_UNREGISTER_ACK;
- atomic_inc(&ep->rma_info.tw_refcount);
- ep->rma_info.async_list_del = 1;
- list_del_init(&window->list);
- del_window = 1;
- } else {
- /* NACK! There are valid references to this window */
- msg->uop = SCIF_UNREGISTER_NACK;
- }
- } else {
- /* The window did not make its way to the list at all. ACK */
- msg->uop = SCIF_UNREGISTER_ACK;
- scif_destroy_remote_window(recv_window);
- }
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (del_window)
- scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan);
- scif_nodeqp_send(ep->remote_dev, msg);
- if (del_window)
- scif_queue_for_cleanup(window, &scif_info.rma);
-}
-
-/**
- * scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to complete registration.
- */
-void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[2];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->reg_state = OP_COMPLETED;
- wake_up(&window->regwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to inform it that registration
- * cannot be completed.
- */
-void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[2];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->reg_state = OP_FAILED;
- wake_up(&window->regwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to complete unregistration.
- */
-void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[1];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->unreg_state = OP_COMPLETED;
- wake_up(&window->unregwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to inform it that unregistration
- * cannot be completed immediately.
- */
-void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[1];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->unreg_state = OP_FAILED;
- wake_up(&window->unregwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-int __scif_pin_pages(void *addr, size_t len, int *out_prot,
- int map_flags, scif_pinned_pages_t *pages)
-{
- struct scif_pinned_pages *pinned_pages;
- int nr_pages, err = 0, i;
- bool vmalloc_addr = false;
- bool try_upgrade = false;
- int prot = *out_prot;
- int ulimit = 0;
- struct mm_struct *mm = NULL;
-
- /* Unsupported flags */
- if (map_flags & ~(SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT))
- return -EINVAL;
- ulimit = !!(map_flags & SCIF_MAP_ULIMIT);
-
- /* Unsupported protection requested */
- if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE))
- return -EINVAL;
-
- /* addr/len must be page aligned. len should be non zero */
- if (!len ||
- (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) ||
- (ALIGN((u64)len, PAGE_SIZE) != (u64)len))
- return -EINVAL;
-
- might_sleep();
-
- nr_pages = len >> PAGE_SHIFT;
-
- /* Allocate a set of pinned pages */
- pinned_pages = scif_create_pinned_pages(nr_pages, prot);
- if (!pinned_pages)
- return -ENOMEM;
-
- if (map_flags & SCIF_MAP_KERNEL) {
- if (is_vmalloc_addr(addr))
- vmalloc_addr = true;
-
- for (i = 0; i < nr_pages; i++) {
- if (vmalloc_addr)
- pinned_pages->pages[i] =
- vmalloc_to_page(addr + (i * PAGE_SIZE));
- else
- pinned_pages->pages[i] =
- virt_to_page(addr + (i * PAGE_SIZE));
- }
- pinned_pages->nr_pages = nr_pages;
- pinned_pages->map_flags = SCIF_MAP_KERNEL;
- } else {
- /*
- * SCIF supports registration caching. If a registration has
- * been requested with read only permissions, then we try
- * to pin the pages with RW permissions so that a subsequent
- * transfer with RW permission can hit the cache instead of
- * invalidating it. If the upgrade fails with RW then we
- * revert back to R permission and retry
- */
- if (prot == SCIF_PROT_READ)
- try_upgrade = true;
- prot |= SCIF_PROT_WRITE;
-retry:
- mm = current->mm;
- if (ulimit) {
- err = __scif_check_inc_pinned_vm(mm, nr_pages);
- if (err) {
- pinned_pages->nr_pages = 0;
- goto error_unmap;
- }
- }
-
- pinned_pages->nr_pages = pin_user_pages_fast(
- (u64)addr,
- nr_pages,
- (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
- pinned_pages->pages);
- if (nr_pages != pinned_pages->nr_pages) {
- if (pinned_pages->nr_pages < 0)
- pinned_pages->nr_pages = 0;
- if (try_upgrade) {
- if (ulimit)
- __scif_dec_pinned_vm_lock(mm, nr_pages);
- /* Roll back any pinned pages */
- unpin_user_pages(pinned_pages->pages,
- pinned_pages->nr_pages);
- prot &= ~SCIF_PROT_WRITE;
- try_upgrade = false;
- goto retry;
- }
- }
- pinned_pages->map_flags = 0;
- }
-
- if (pinned_pages->nr_pages < nr_pages) {
- err = -EFAULT;
- goto dec_pinned;
- }
-
- *out_prot = prot;
- atomic_set(&pinned_pages->ref_count, 1);
- *pages = pinned_pages;
- return err;
-dec_pinned:
- if (ulimit)
- __scif_dec_pinned_vm_lock(mm, nr_pages);
- /* Something went wrong! Rollback */
-error_unmap:
- scif_destroy_pinned_pages(pinned_pages);
- *pages = NULL;
- dev_dbg(scif_info.mdev.this_device,
- "%s %d err %d len 0x%lx\n", __func__, __LINE__, err, len);
- return err;
-}
-
-int scif_pin_pages(void *addr, size_t len, int prot,
- int map_flags, scif_pinned_pages_t *pages)
-{
- return __scif_pin_pages(addr, len, &prot, map_flags, pages);
-}
-EXPORT_SYMBOL_GPL(scif_pin_pages);
-
-int scif_unpin_pages(scif_pinned_pages_t pinned_pages)
-{
- int err = 0, ret;
-
- if (!pinned_pages || SCIFEP_MAGIC != pinned_pages->magic)
- return -EINVAL;
-
- ret = atomic_sub_return(1, &pinned_pages->ref_count);
- if (ret < 0) {
- dev_err(scif_info.mdev.this_device,
- "%s %d scif_unpin_pages called without pinning? rc %d\n",
- __func__, __LINE__, ret);
- return -EINVAL;
- }
- /*
- * Destroy the window if the ref count for this set of pinned
- * pages has dropped to zero. If it is positive then there is
- * a valid registered window which is backed by these pages and
- * it will be destroyed once all such windows are unregistered.
- */
- if (!ret)
- err = scif_destroy_pinned_pages(pinned_pages);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_unpin_pages);
-
-static inline void
-scif_insert_local_window(struct scif_window *window, struct scif_endpt *ep)
-{
- mutex_lock(&ep->rma_info.rma_lock);
- scif_insert_window(window, &ep->rma_info.reg_list);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-off_t scif_register_pinned_pages(scif_epd_t epd,
- scif_pinned_pages_t pinned_pages,
- off_t offset, int map_flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- s64 computed_offset;
- struct scif_window *window;
- int err;
- size_t len;
- struct device *spdev;
-
- /* Unsupported flags */
- if (map_flags & ~SCIF_MAP_FIXED)
- return -EINVAL;
-
- len = pinned_pages->nr_pages << PAGE_SHIFT;
-
- /*
- * Offset is not page aligned/negative or offset+len
- * wraps around with SCIF_MAP_FIXED.
- */
- if ((map_flags & SCIF_MAP_FIXED) &&
- ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset)))
- return -EINVAL;
-
- might_sleep();
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
- /*
- * It is an error to pass pinned_pages to scif_register_pinned_pages()
- * after calling scif_unpin_pages().
- */
- if (!atomic_add_unless(&pinned_pages->ref_count, 1, 0))
- return -EINVAL;
-
- /* Compute the offset for this registration */
- err = scif_get_window_offset(ep, map_flags, offset,
- len, &computed_offset);
- if (err) {
- atomic_sub(1, &pinned_pages->ref_count);
- return err;
- }
-
- /* Allocate and prepare self registration window */
- window = scif_create_window(ep, pinned_pages->nr_pages,
- computed_offset, false);
- if (!window) {
- atomic_sub(1, &pinned_pages->ref_count);
- scif_free_window_offset(ep, NULL, computed_offset);
- return -ENOMEM;
- }
-
- window->pinned_pages = pinned_pages;
- window->nr_pages = pinned_pages->nr_pages;
- window->prot = pinned_pages->prot;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- scif_destroy_window(ep, window);
- return err;
- }
- err = scif_send_alloc_request(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- /* Prepare the remote registration window */
- err = scif_prep_remote_window(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- /* Tell the peer about the new window */
- err = scif_send_scif_register(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- scif_put_peer_dev(spdev);
- /* No further failures expected. Insert new window */
- scif_insert_local_window(window, ep);
- return computed_offset;
-error_unmap:
- scif_destroy_window(ep, window);
- scif_put_peer_dev(spdev);
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_register_pinned_pages);
-
-off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
- int prot, int map_flags)
-{
- scif_pinned_pages_t pinned_pages;
- off_t err;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- s64 computed_offset;
- struct scif_window *window;
- struct mm_struct *mm = NULL;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n",
- epd, addr, len, offset, prot, map_flags);
- /* Unsupported flags */
- if (map_flags & ~(SCIF_MAP_FIXED | SCIF_MAP_KERNEL))
- return -EINVAL;
-
- /*
- * Offset is not page aligned/negative or offset+len
- * wraps around with SCIF_MAP_FIXED.
- */
- if ((map_flags & SCIF_MAP_FIXED) &&
- ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset)))
- return -EINVAL;
-
- /* Unsupported protection requested */
- if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE))
- return -EINVAL;
-
- /* addr/len must be page aligned. len should be non zero */
- if (!len || (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) ||
- (ALIGN(len, PAGE_SIZE) != len))
- return -EINVAL;
-
- might_sleep();
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- /* Compute the offset for this registration */
- err = scif_get_window_offset(ep, map_flags, offset,
- len >> PAGE_SHIFT, &computed_offset);
- if (err)
- return err;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- scif_free_window_offset(ep, NULL, computed_offset);
- return err;
- }
- /* Allocate and prepare self registration window */
- window = scif_create_window(ep, len >> PAGE_SHIFT,
- computed_offset, false);
- if (!window) {
- scif_free_window_offset(ep, NULL, computed_offset);
- scif_put_peer_dev(spdev);
- return -ENOMEM;
- }
-
- window->nr_pages = len >> PAGE_SHIFT;
-
- err = scif_send_alloc_request(ep, window);
- if (err) {
- scif_destroy_incomplete_window(ep, window);
- scif_put_peer_dev(spdev);
- return err;
- }
-
- if (!(map_flags & SCIF_MAP_KERNEL)) {
- mm = __scif_acquire_mm();
- map_flags |= SCIF_MAP_ULIMIT;
- }
- /* Pin down the pages */
- err = __scif_pin_pages(addr, len, &prot,
- map_flags & (SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT),
- &pinned_pages);
- if (err) {
- scif_destroy_incomplete_window(ep, window);
- __scif_release_mm(mm);
- goto error;
- }
-
- window->pinned_pages = pinned_pages;
- window->prot = pinned_pages->prot;
- window->mm = mm;
-
- /* Prepare the remote registration window */
- err = scif_prep_remote_window(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %ld\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- /* Tell the peer about the new window */
- err = scif_send_scif_register(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %ld\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- scif_put_peer_dev(spdev);
- /* No further failures expected. Insert new window */
- scif_insert_local_window(window, ep);
- dev_dbg(&ep->remote_dev->sdev->dev,
- "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n",
- epd, addr, len, computed_offset);
- return computed_offset;
-error_unmap:
- scif_destroy_window(ep, window);
-error:
- scif_put_peer_dev(spdev);
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %ld\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_register);
-
-int
-scif_unregister(scif_epd_t epd, off_t offset, size_t len)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_window *window = NULL;
- struct scif_rma_req req;
- int nr_pages, err;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n",
- ep, offset, len);
- /* len must be page aligned. len should be non zero */
- if (!len ||
- (ALIGN((u64)len, PAGE_SIZE) != (u64)len))
- return -EINVAL;
-
- /* Offset is not page aligned or offset+len wraps around */
- if ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset))
- return -EINVAL;
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- might_sleep();
- nr_pages = len >> PAGE_SHIFT;
-
- req.out_window = &window;
- req.offset = offset;
- req.prot = 0;
- req.nr_bytes = len;
- req.type = SCIF_WINDOW_FULL;
- req.head = &ep->rma_info.reg_list;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error;
- }
- /* Unregister all the windows in this range */
- err = scif_rma_list_unregister(window, offset, nr_pages);
- if (err)
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- scif_put_peer_dev(spdev);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_unregister);
diff --git a/drivers/misc/mic/scif/scif_rma.h b/drivers/misc/mic/scif/scif_rma.h
deleted file mode 100644
index 964dd0fc3657..000000000000
--- a/drivers/misc/mic/scif/scif_rma.h
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef SCIF_RMA_H
-#define SCIF_RMA_H
-
-#include <linux/intel-iommu.h>
-#include <linux/mmu_notifier.h>
-
-#include "../bus/scif_bus.h"
-
-/* If this bit is set then the mark is a remote fence mark */
-#define SCIF_REMOTE_FENCE_BIT 31
-/* Magic value used to indicate a remote fence request */
-#define SCIF_REMOTE_FENCE BIT_ULL(SCIF_REMOTE_FENCE_BIT)
-
-#define SCIF_MAX_UNALIGNED_BUF_SIZE (1024 * 1024ULL)
-#define SCIF_KMEM_UNALIGNED_BUF_SIZE (SCIF_MAX_UNALIGNED_BUF_SIZE + \
- (L1_CACHE_BYTES << 1))
-
-#define SCIF_IOVA_START_PFN (1)
-#define SCIF_IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define SCIF_DMA_64BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(64))
-#define SCIF_DMA_63BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(63))
-
-/*
- * struct scif_endpt_rma_info - Per Endpoint Remote Memory Access Information
- *
- * @reg_list: List of registration windows for self
- * @remote_reg_list: List of registration windows for peer
- * @iovad: Offset generator
- * @rma_lock: Synchronizes access to self/remote list and also protects the
- * window from being destroyed while RMAs are in progress.
- * @tc_lock: Synchronizes access to temporary cached windows list
- * for SCIF Registration Caching.
- * @mmn_lock: Synchronizes access to the list of MMU notifiers registered
- * @tw_refcount: Keeps track of number of outstanding temporary registered
- * windows created by scif_vreadfrom/scif_vwriteto which have
- * not been destroyed.
- * @tcw_refcount: Same as tw_refcount but for temporary cached windows
- * @tcw_total_pages: Same as tcw_refcount but in terms of pages pinned
- * @mmn_list: MMU notifier so that we can destroy the windows when required
- * @fence_refcount: Keeps track of number of outstanding remote fence
- * requests which have been received by the peer.
- * @dma_chan: DMA channel used for all DMA transfers for this endpoint.
- * @async_list_del: Detect asynchronous list entry deletion
- * @vma_list: List of vmas with remote memory mappings
- * @markwq: Wait queue used for scif_fence_mark/scif_fence_wait
-*/
-struct scif_endpt_rma_info {
- struct list_head reg_list;
- struct list_head remote_reg_list;
- struct iova_domain iovad;
- struct mutex rma_lock;
- spinlock_t tc_lock;
- struct mutex mmn_lock;
- atomic_t tw_refcount;
- atomic_t tcw_refcount;
- atomic_t tcw_total_pages;
- struct list_head mmn_list;
- atomic_t fence_refcount;
- struct dma_chan *dma_chan;
- int async_list_del;
- struct list_head vma_list;
- wait_queue_head_t markwq;
-};
-
-/*
- * struct scif_fence_info - used for tracking fence requests
- *
- * @state: State of this transfer
- * @wq: Fences wait on this queue
- * @dma_mark: Used for storing the DMA mark
- */
-struct scif_fence_info {
- enum scif_msg_state state;
- struct completion comp;
- int dma_mark;
-};
-
-/*
- * struct scif_remote_fence_info - used for tracking remote fence requests
- *
- * @msg: List of SCIF node QP fence messages
- * @list: Link to list of remote fence requests
- */
-struct scif_remote_fence_info {
- struct scifmsg msg;
- struct list_head list;
-};
-
-/*
- * Specifies whether an RMA operation can span across partial windows, a single
- * window or multiple contiguous windows. Mmaps can span across partial windows.
- * Unregistration can span across complete windows. scif_get_pages() can span a
- * single window. A window can also be of type self or peer.
- */
-enum scif_window_type {
- SCIF_WINDOW_PARTIAL,
- SCIF_WINDOW_SINGLE,
- SCIF_WINDOW_FULL,
- SCIF_WINDOW_SELF,
- SCIF_WINDOW_PEER
-};
-
-/* The number of physical addresses that can be stored in a PAGE. */
-#define SCIF_NR_ADDR_IN_PAGE (0x1000 >> 3)
-
-/*
- * struct scif_rma_lookup - RMA lookup data structure for page list transfers
- *
- * Store an array of lookup offsets. Each offset in this array maps
- * one 4K page containing 512 physical addresses i.e. 2MB. 512 such
- * offsets in a 4K page will correspond to 1GB of registered address space.
-
- * @lookup: Array of offsets
- * @offset: DMA offset of lookup array
- */
-struct scif_rma_lookup {
- dma_addr_t *lookup;
- dma_addr_t offset;
-};
-
-/*
- * struct scif_pinned_pages - A set of pinned pages obtained with
- * scif_pin_pages() which could be part of multiple registered
- * windows across different end points.
- *
- * @nr_pages: Number of pages which is defined as a s64 instead of an int
- * to avoid sign extension with buffers >= 2GB
- * @prot: read/write protections
- * @map_flags: Flags specified during the pin operation
- * @ref_count: Reference count bumped in terms of number of pages
- * @magic: A magic value
- * @pages: Array of pointers to struct pages populated with get_user_pages(..)
- */
-struct scif_pinned_pages {
- s64 nr_pages;
- int prot;
- int map_flags;
- atomic_t ref_count;
- u64 magic;
- struct page **pages;
-};
-
-/*
- * struct scif_status - Stores DMA status update information
- *
- * @src_dma_addr: Source buffer DMA address
- * @val: src location for value to be written to the destination
- * @ep: SCIF endpoint
- */
-struct scif_status {
- dma_addr_t src_dma_addr;
- u64 val;
- struct scif_endpt *ep;
-};
-
-/*
- * struct scif_cb_arg - Stores the argument of the callback func
- *
- * @src_dma_addr: Source buffer DMA address
- * @status: DMA status
- * @ep: SCIF endpoint
- */
-struct scif_cb_arg {
- dma_addr_t src_dma_addr;
- struct scif_status *status;
- struct scif_endpt *ep;
-};
-
-/*
- * struct scif_window - Registration Window for Self and Remote
- *
- * @nr_pages: Number of pages which is defined as a s64 instead of an int
- * to avoid sign extension with buffers >= 2GB
- * @nr_contig_chunks: Number of contiguous physical chunks
- * @prot: read/write protections
- * @ref_count: reference count in terms of number of pages
- * @magic: Cookie to detect corruption
- * @offset: registered offset
- * @va_for_temp: va address that this window represents
- * @dma_mark: Used to determine if all DMAs against the window are done
- * @ep: Pointer to EP. Useful for passing EP around with messages to
- avoid expensive list traversals.
- * @list: link to list of windows for the endpoint
- * @type: self or peer window
- * @peer_window: Pointer to peer window. Useful for sending messages to peer
- * without requiring an extra list traversal
- * @unreg_state: unregistration state
- * @offset_freed: True if the offset has been freed
- * @temp: True for temporary windows created via scif_vreadfrom/scif_vwriteto
- * @mm: memory descriptor for the task_struct which initiated the RMA
- * @st: scatter gather table for DMA mappings with IOMMU enabled
- * @pinned_pages: The set of pinned_pages backing this window
- * @alloc_handle: Handle for sending ALLOC_REQ
- * @regwq: Wait Queue for an registration (N)ACK
- * @reg_state: Registration state
- * @unregwq: Wait Queue for an unregistration (N)ACK
- * @dma_addr_lookup: Lookup for physical addresses used for DMA
- * @nr_lookup: Number of entries in lookup
- * @mapped_offset: Offset used to map the window by the peer
- * @dma_addr: Array of physical addresses used for Mgmt node & MIC initiated DMA
- * @num_pages: Array specifying number of pages for each physical address
- */
-struct scif_window {
- s64 nr_pages;
- int nr_contig_chunks;
- int prot;
- int ref_count;
- u64 magic;
- s64 offset;
- unsigned long va_for_temp;
- int dma_mark;
- u64 ep;
- struct list_head list;
- enum scif_window_type type;
- u64 peer_window;
- enum scif_msg_state unreg_state;
- bool offset_freed;
- bool temp;
- struct mm_struct *mm;
- struct sg_table *st;
- union {
- struct {
- struct scif_pinned_pages *pinned_pages;
- struct scif_allocmsg alloc_handle;
- wait_queue_head_t regwq;
- enum scif_msg_state reg_state;
- wait_queue_head_t unregwq;
- };
- struct {
- struct scif_rma_lookup dma_addr_lookup;
- struct scif_rma_lookup num_pages_lookup;
- int nr_lookup;
- dma_addr_t mapped_offset;
- };
- };
- dma_addr_t *dma_addr;
- u64 *num_pages;
-} __packed;
-
-/*
- * scif_mmu_notif - SCIF mmu notifier information
- *
- * @mmu_notifier ep_mmu_notifier: MMU notifier operations
- * @tc_reg_list: List of temp registration windows for self
- * @mm: memory descriptor for the task_struct which initiated the RMA
- * @ep: SCIF endpoint
- * @list: link to list of MMU notifier information
- */
-struct scif_mmu_notif {
-#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier ep_mmu_notifier;
-#endif
- struct list_head tc_reg_list;
- struct mm_struct *mm;
- struct scif_endpt *ep;
- struct list_head list;
-};
-
-enum scif_rma_dir {
- SCIF_LOCAL_TO_REMOTE,
- SCIF_REMOTE_TO_LOCAL
-};
-
-extern struct kmem_cache *unaligned_cache;
-/* Initialize RMA for this EP */
-void scif_rma_ep_init(struct scif_endpt *ep);
-/* Check if epd can be uninitialized */
-int scif_rma_ep_can_uninit(struct scif_endpt *ep);
-/* Obtain a new offset. Callee must grab RMA lock */
-int scif_get_window_offset(struct scif_endpt *ep, int flags,
- s64 offset, int nr_pages, s64 *out_offset);
-/* Free offset. Callee must grab RMA lock */
-void scif_free_window_offset(struct scif_endpt *ep,
- struct scif_window *window, s64 offset);
-/* Create self registration window */
-struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
- s64 offset, bool temp);
-/* Destroy self registration window.*/
-int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window);
-void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window);
-/* Map pages of self window to Aperture/PCI */
-int scif_map_window(struct scif_dev *remote_dev,
- struct scif_window *window);
-/* Unregister a self window */
-int scif_unregister_window(struct scif_window *window);
-/* Destroy remote registration window */
-void
-scif_destroy_remote_window(struct scif_window *window);
-/* remove valid remote memory mappings from process address space */
-void scif_zap_mmaps(int node);
-/* Query if any applications have remote memory mappings */
-bool scif_rma_do_apps_have_mmaps(int node);
-/* Cleanup remote registration lists for zombie endpoints */
-void scif_cleanup_rma_for_zombies(int node);
-/* Reserve a DMA channel for a particular endpoint */
-int scif_reserve_dma_chan(struct scif_endpt *ep);
-/* Setup a DMA mark for an endpoint */
-int _scif_fence_mark(scif_epd_t epd, int *mark);
-int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
- enum scif_window_type type);
-void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_mmu_notif_handler(struct work_struct *work);
-void scif_rma_handle_remote_fences(void);
-void scif_rma_destroy_windows(void);
-void scif_rma_destroy_tcw_invalid(void);
-int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan);
-
-struct scif_window_iter {
- s64 offset;
- int index;
-};
-
-static inline void
-scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter)
-{
- iter->offset = window->offset;
- iter->index = 0;
-}
-
-dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
- size_t *nr_bytes,
- struct scif_window_iter *iter);
-static inline
-dma_addr_t __scif_off_to_dma_addr(struct scif_window *window, s64 off)
-{
- return scif_off_to_dma_addr(window, off, NULL, NULL);
-}
-
-static inline bool scif_unaligned(off_t src_offset, off_t dst_offset)
-{
- src_offset = src_offset & (L1_CACHE_BYTES - 1);
- dst_offset = dst_offset & (L1_CACHE_BYTES - 1);
- return !(src_offset == dst_offset);
-}
-
-/*
- * scif_zalloc:
- * @size: Size of the allocation request.
- *
- * Helper API which attempts to allocate zeroed pages via
- * __get_free_pages(..) first and then falls back on
- * vzalloc(..) if that fails.
- */
-static inline void *scif_zalloc(size_t size)
-{
- void *ret = NULL;
- size_t align = ALIGN(size, PAGE_SIZE);
-
- if (align && get_order(align) < MAX_ORDER)
- ret = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(align));
- return ret ? ret : vzalloc(align);
-}
-
-/*
- * scif_free:
- * @addr: Address to be freed.
- * @size: Size of the allocation.
- * Helper API which frees memory allocated via scif_zalloc().
- */
-static inline void scif_free(void *addr, size_t size)
-{
- size_t align = ALIGN(size, PAGE_SIZE);
-
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- free_pages((unsigned long)addr, get_order(align));
-}
-
-static inline void scif_get_window(struct scif_window *window, int nr_pages)
-{
- window->ref_count += nr_pages;
-}
-
-static inline void scif_put_window(struct scif_window *window, int nr_pages)
-{
- window->ref_count -= nr_pages;
-}
-
-static inline void scif_set_window_ref(struct scif_window *window, int nr_pages)
-{
- window->ref_count = nr_pages;
-}
-
-static inline void
-scif_queue_for_cleanup(struct scif_window *window, struct list_head *list)
-{
- spin_lock(&scif_info.rmalock);
- list_add_tail(&window->list, list);
- spin_unlock(&scif_info.rmalock);
- schedule_work(&scif_info.misc_work);
-}
-
-static inline void __scif_rma_destroy_tcw_helper(struct scif_window *window)
-{
- list_del_init(&window->list);
- scif_queue_for_cleanup(window, &scif_info.rma_tc);
-}
-
-static inline bool scif_is_iommu_enabled(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- return intel_iommu_enabled;
-#else
- return false;
-#endif
-}
-#endif /* SCIF_RMA_H */
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
deleted file mode 100644
index ef923ba134c8..000000000000
--- a/drivers/misc/mic/scif/scif_rma_list.c
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include <linux/mmu_notifier.h>
-#include <linux/highmem.h>
-
-/*
- * scif_insert_tcw:
- *
- * Insert a temp window to the temp registration list sorted by va_for_temp.
- * RMA lock must be held.
- */
-void scif_insert_tcw(struct scif_window *window, struct list_head *head)
-{
- struct scif_window *curr = NULL;
- struct scif_window *prev = list_entry(head, struct scif_window, list);
- struct list_head *item;
-
- INIT_LIST_HEAD(&window->list);
- /* Compare with tail and if the entry is new tail add it to the end */
- if (!list_empty(head)) {
- curr = list_entry(head->prev, struct scif_window, list);
- if (curr->va_for_temp < window->va_for_temp) {
- list_add_tail(&window->list, head);
- return;
- }
- }
- list_for_each(item, head) {
- curr = list_entry(item, struct scif_window, list);
- if (curr->va_for_temp > window->va_for_temp)
- break;
- prev = curr;
- }
- list_add(&window->list, &prev->list);
-}
-
-/*
- * scif_insert_window:
- *
- * Insert a window to the self registration list sorted by offset.
- * RMA lock must be held.
- */
-void scif_insert_window(struct scif_window *window, struct list_head *head)
-{
- struct scif_window *curr = NULL, *prev = NULL;
- struct list_head *item;
-
- INIT_LIST_HEAD(&window->list);
- list_for_each(item, head) {
- curr = list_entry(item, struct scif_window, list);
- if (curr->offset > window->offset)
- break;
- prev = curr;
- }
- if (!prev)
- list_add(&window->list, head);
- else
- list_add(&window->list, &prev->list);
- scif_set_window_ref(window, window->nr_pages);
-}
-
-/*
- * scif_query_tcw:
- *
- * Query the temp cached registration list of ep for an overlapping window
- * in case of permission mismatch, destroy the previous window. if permissions
- * match and overlap is partial, destroy the window but return the new range
- * RMA lock must be held.
- */
-int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
-{
- struct list_head *item, *temp, *head = req->head;
- struct scif_window *window;
- u64 start_va_window, start_va_req = req->va_for_temp;
- u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
-
- if (!req->nr_bytes)
- return -EINVAL;
- /*
- * Avoid traversing the entire list to find out that there
- * is no entry that matches
- */
- if (!list_empty(head)) {
- window = list_last_entry(head, struct scif_window, list);
- end_va_window = window->va_for_temp +
- (window->nr_pages << PAGE_SHIFT);
- if (start_va_req > end_va_window)
- return -ENXIO;
- }
- list_for_each_safe(item, temp, head) {
- window = list_entry(item, struct scif_window, list);
- start_va_window = window->va_for_temp;
- end_va_window = window->va_for_temp +
- (window->nr_pages << PAGE_SHIFT);
- if (start_va_req < start_va_window &&
- end_va_req < start_va_window)
- break;
- if (start_va_req >= end_va_window)
- continue;
- if ((window->prot & req->prot) == req->prot) {
- if (start_va_req >= start_va_window &&
- end_va_req <= end_va_window) {
- *req->out_window = window;
- return 0;
- }
- /* expand window */
- if (start_va_req < start_va_window) {
- req->nr_bytes +=
- start_va_window - start_va_req;
- req->va_for_temp = start_va_window;
- }
- if (end_va_req >= end_va_window)
- req->nr_bytes += end_va_window - end_va_req;
- }
- /* Destroy the old window to create a new one */
- __scif_rma_destroy_tcw_helper(window);
- break;
- }
- return -ENXIO;
-}
-
-/*
- * scif_query_window:
- *
- * Query the registration list and check if a valid contiguous
- * range of windows exist.
- * RMA lock must be held.
- */
-int scif_query_window(struct scif_rma_req *req)
-{
- struct list_head *item;
- struct scif_window *window;
- s64 end_offset, offset = req->offset;
- u64 tmp_min, nr_bytes_left = req->nr_bytes;
-
- if (!req->nr_bytes)
- return -EINVAL;
-
- list_for_each(item, req->head) {
- window = list_entry(item, struct scif_window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- if (offset < window->offset)
- /* Offset not found! */
- return -ENXIO;
- if (offset >= end_offset)
- continue;
- /* Check read/write protections. */
- if ((window->prot & req->prot) != req->prot)
- return -EPERM;
- if (nr_bytes_left == req->nr_bytes)
- /* Store the first window */
- *req->out_window = window;
- tmp_min = min((u64)end_offset - offset, nr_bytes_left);
- nr_bytes_left -= tmp_min;
- offset += tmp_min;
- /*
- * Range requested encompasses
- * multiple windows contiguously.
- */
- if (!nr_bytes_left) {
- /* Done for partial window */
- if (req->type == SCIF_WINDOW_PARTIAL ||
- req->type == SCIF_WINDOW_SINGLE)
- return 0;
- /* Extra logic for full windows */
- if (offset == end_offset)
- /* Spanning multiple whole windows */
- return 0;
- /* Not spanning multiple whole windows */
- return -ENXIO;
- }
- if (req->type == SCIF_WINDOW_SINGLE)
- break;
- }
- dev_err(scif_info.mdev.this_device,
- "%s %d ENXIO\n", __func__, __LINE__);
- return -ENXIO;
-}
-
-/*
- * scif_rma_list_unregister:
- *
- * Traverse the self registration list starting from window:
- * 1) Call scif_unregister_window(..)
- * RMA lock must be held.
- */
-int scif_rma_list_unregister(struct scif_window *window,
- s64 offset, int nr_pages)
-{
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
- struct list_head *head = &ep->rma_info.reg_list;
- s64 end_offset;
- int err = 0;
- int loop_nr_pages;
- struct scif_window *_window;
-
- list_for_each_entry_safe_from(window, _window, head, list) {
- end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
- nr_pages);
- err = scif_unregister_window(window);
- if (err)
- return err;
- nr_pages -= loop_nr_pages;
- offset += (loop_nr_pages << PAGE_SHIFT);
- if (!nr_pages)
- break;
- }
- return 0;
-}
-
-/*
- * scif_unmap_all_window:
- *
- * Traverse all the windows in the self registration list and:
- * 1) Delete any DMA mappings created
- */
-void scif_unmap_all_windows(scif_epd_t epd)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct list_head *head = &ep->rma_info.reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
- list_for_each_safe(item, tmp, head) {
- window = list_entry(item, struct scif_window, list);
- scif_unmap_window(ep->remote_dev, window);
- }
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/*
- * scif_unregister_all_window:
- *
- * Traverse all the windows in the self registration list and:
- * 1) Call scif_unregister_window(..)
- * RMA lock must be held.
- */
-int scif_unregister_all_windows(scif_epd_t epd)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct list_head *head = &ep->rma_info.reg_list;
- int err = 0;
-
- mutex_lock(&ep->rma_info.rma_lock);
-retry:
- item = NULL;
- tmp = NULL;
- list_for_each_safe(item, tmp, head) {
- window = list_entry(item, struct scif_window, list);
- ep->rma_info.async_list_del = 0;
- err = scif_unregister_window(window);
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n",
- __func__, __LINE__, err);
- /*
- * Need to restart list traversal if there has been
- * an asynchronous list entry deletion.
- */
- if (READ_ONCE(ep->rma_info.async_list_del))
- goto retry;
- }
- mutex_unlock(&ep->rma_info.rma_lock);
- if (!list_empty(&ep->rma_info.mmn_list)) {
- spin_lock(&scif_info.rmalock);
- list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
- spin_unlock(&scif_info.rmalock);
- schedule_work(&scif_info.mmu_notif_work);
- }
- return err;
-}
diff --git a/drivers/misc/mic/scif/scif_rma_list.h b/drivers/misc/mic/scif/scif_rma_list.h
deleted file mode 100644
index 0f8e0ed65614..000000000000
--- a/drivers/misc/mic/scif/scif_rma_list.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_RMA_LIST_H
-#define SCIF_RMA_LIST_H
-
-/*
- * struct scif_rma_req - Self Registration list RMA Request query
- *
- * @out_window - Returns the window if found
- * @offset: Starting offset
- * @nr_bytes: number of bytes
- * @prot: protection requested i.e. read or write or both
- * @type: Specify single, partial or multiple windows
- * @head: Head of list on which to search
- * @va_for_temp: VA for searching temporary cached windows
- */
-struct scif_rma_req {
- struct scif_window **out_window;
- union {
- s64 offset;
- unsigned long va_for_temp;
- };
- size_t nr_bytes;
- int prot;
- enum scif_window_type type;
- struct list_head *head;
-};
-
-/* Insert */
-void scif_insert_window(struct scif_window *window, struct list_head *head);
-void scif_insert_tcw(struct scif_window *window,
- struct list_head *head);
-/* Query */
-int scif_query_window(struct scif_rma_req *request);
-int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *request);
-/* Called from close to unregister all self windows */
-int scif_unregister_all_windows(scif_epd_t epd);
-void scif_unmap_all_windows(scif_epd_t epd);
-/* Traverse list and unregister */
-int scif_rma_list_unregister(struct scif_window *window, s64 offset,
- int nr_pages);
-#endif /* SCIF_RMA_LIST_H */
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile
deleted file mode 100644
index 51b9b0022786..000000000000
--- a/drivers/misc/mic/vop/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2016, Intel Corporation.
-#
-obj-$(CONFIG_VOP) := vop.o
-
-vop-objs += vop_main.o
-vop-objs += vop_debugfs.o
-vop-objs += vop_vringh.o
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
deleted file mode 100644
index 9d4f175f4dd1..000000000000
--- a/drivers/misc/mic/vop/vop_debugfs.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include "vop_main.h"
-
-static int vop_dp_show(struct seq_file *s, void *pos)
-{
- struct mic_device_desc *d;
- struct mic_device_ctrl *dc;
- struct mic_vqconfig *vqconfig;
- __u32 *features;
- __u8 *config;
- struct vop_info *vi = s->private;
- struct vop_device *vpdev = vi->vpdev;
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
- int j, k;
-
- seq_printf(s, "Bootparam: magic 0x%x\n",
- bootparam->magic);
- seq_printf(s, "Bootparam: h2c_config_db %d\n",
- bootparam->h2c_config_db);
- seq_printf(s, "Bootparam: node_id %d\n",
- bootparam->node_id);
- seq_printf(s, "Bootparam: c2h_scif_db %d\n",
- bootparam->c2h_scif_db);
- seq_printf(s, "Bootparam: h2c_scif_db %d\n",
- bootparam->h2c_scif_db);
- seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
- bootparam->scif_host_dma_addr);
- seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
- bootparam->scif_card_dma_addr);
-
- for (j = sizeof(*bootparam);
- j < MIC_DP_SIZE; j += mic_total_desc_size(d)) {
- d = (void *)bootparam + j;
- dc = (void *)d + mic_aligned_desc_size(d);
-
- /* end of list */
- if (d->type == 0)
- break;
-
- if (d->type == -1)
- continue;
-
- seq_printf(s, "Type %d ", d->type);
- seq_printf(s, "Num VQ %d ", d->num_vq);
- seq_printf(s, "Feature Len %d\n", d->feature_len);
- seq_printf(s, "Config Len %d ", d->config_len);
- seq_printf(s, "Shutdown Status %d\n", d->status);
-
- for (k = 0; k < d->num_vq; k++) {
- vqconfig = mic_vq_config(d) + k;
- seq_printf(s, "vqconfig[%d]: ", k);
- seq_printf(s, "address 0x%llx ",
- vqconfig->address);
- seq_printf(s, "num %d ", vqconfig->num);
- seq_printf(s, "used address 0x%llx\n",
- vqconfig->used_address);
- }
-
- features = (__u32 *)mic_vq_features(d);
- seq_printf(s, "Features: Host 0x%x ", features[0]);
- seq_printf(s, "Guest 0x%x\n", features[1]);
-
- config = mic_vq_configspace(d);
- for (k = 0; k < d->config_len; k++)
- seq_printf(s, "config[%d]=%d\n", k, config[k]);
-
- seq_puts(s, "Device control:\n");
- seq_printf(s, "Config Change %d ", dc->config_change);
- seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
- seq_printf(s, "Guest Ack %d ", dc->guest_ack);
- seq_printf(s, "Host ack %d\n", dc->host_ack);
- seq_printf(s, "Used address updated %d ",
- dc->used_address_updated);
- seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
- seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
- seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
- }
- schedule_work(&vi->hotplug_work);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(vop_dp);
-
-static int vop_vdev_info_show(struct seq_file *s, void *unused)
-{
- struct vop_info *vi = s->private;
- struct list_head *pos, *tmp;
- struct vop_vdev *vdev;
- int i, j;
-
- mutex_lock(&vi->vop_mutex);
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev = list_entry(pos, struct vop_vdev, list);
- seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n",
- vdev->virtio_id,
- vop_vdevup(vdev) ? "UP" : "DOWN",
- vdev->in_bytes,
- vdev->out_bytes,
- vdev->in_bytes_dma,
- vdev->out_bytes_dma);
- for (i = 0; i < MIC_MAX_VRINGS; i++) {
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
- struct vop_vringh *vvr = &vdev->vvr[i];
- struct vringh *vrh = &vvr->vrh;
- int num = vrh->vring.num;
-
- if (!num)
- continue;
- desc = vrh->vring.desc;
- seq_printf(s, "vring i %d avail_idx %d",
- i, vvr->vring.info->avail_idx & (num - 1));
- seq_printf(s, " vring i %d avail_idx %d\n",
- i, vvr->vring.info->avail_idx);
- seq_printf(s, "vrh i %d weak_barriers %d",
- i, vrh->weak_barriers);
- seq_printf(s, " last_avail_idx %d last_used_idx %d",
- vrh->last_avail_idx, vrh->last_used_idx);
- seq_printf(s, " completed %d\n", vrh->completed);
- for (j = 0; j < num; j++) {
- seq_printf(s, "desc[%d] addr 0x%llx len %d",
- j, desc->addr, desc->len);
- seq_printf(s, " flags 0x%x next %d\n",
- desc->flags, desc->next);
- desc++;
- }
- avail = vrh->vring.avail;
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh,
- avail->idx) & (num - 1));
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh, avail->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "avail ring[%d] %d\n",
- j, avail->ring[j]);
- used = vrh->vring.used;
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx) & (num - 1));
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "used ring[%d] id %d len %d\n",
- j, vringh32_to_cpu(vrh,
- used->ring[j].id),
- vringh32_to_cpu(vrh,
- used->ring[j].len));
- }
- }
- mutex_unlock(&vi->vop_mutex);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(vop_vdev_info);
-
-void vop_init_debugfs(struct vop_info *vi)
-{
- char name[16];
-
- snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode);
- vi->dbg = debugfs_create_dir(name, NULL);
- debugfs_create_file("dp", 0444, vi->dbg, vi, &vop_dp_fops);
- debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vop_vdev_info_fops);
-}
-
-void vop_exit_debugfs(struct vop_info *vi)
-{
- debugfs_remove_recursive(vi->dbg);
-}
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
deleted file mode 100644
index 714b94f42d38..000000000000
--- a/drivers/misc/mic/vop/vop_main.c
+++ /dev/null
@@ -1,784 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Adapted from:
- *
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-#include "vop_main.h"
-
-#define VOP_MAX_VRINGS 4
-
-/*
- * _vop_vdev - Allocated per virtio device instance injected by the peer.
- *
- * @vdev: Virtio device
- * @desc: Virtio device page descriptor
- * @dc: Virtio device control
- * @vpdev: VOP device which is the parent for this virtio device
- * @vr: Buffer for accessing the VRING
- * @used_virt: Virtual address of used ring
- * @used: DMA address of used ring
- * @used_size: Size of the used buffer
- * @reset_done: Track whether VOP reset is complete
- * @virtio_cookie: Cookie returned upon requesting a interrupt
- * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
- * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
- * @dnode: The destination node
- */
-struct _vop_vdev {
- struct virtio_device vdev;
- struct mic_device_desc __iomem *desc;
- struct mic_device_ctrl __iomem *dc;
- struct vop_device *vpdev;
- void __iomem *vr[VOP_MAX_VRINGS];
- void *used_virt[VOP_MAX_VRINGS];
- dma_addr_t used[VOP_MAX_VRINGS];
- int used_size[VOP_MAX_VRINGS];
- struct completion reset_done;
- struct mic_irq *virtio_cookie;
- int c2h_vdev_db;
- int h2c_vdev_db;
- int dnode;
-};
-
-#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
-
-#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
-
-/* Helper API to obtain the parent of the virtio device */
-static inline struct device *_vop_dev(struct _vop_vdev *vdev)
-{
- return vdev->vdev.dev.parent;
-}
-
-static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
-{
- return sizeof(*desc)
- + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
- + ioread8(&desc->feature_len) * 2
- + ioread8(&desc->config_len);
-}
-
-static inline struct mic_vqconfig __iomem *
-_vop_vq_config(struct mic_device_desc __iomem *desc)
-{
- return (struct mic_vqconfig __iomem *)(desc + 1);
-}
-
-static inline u8 __iomem *
-_vop_vq_features(struct mic_device_desc __iomem *desc)
-{
- return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
-}
-
-static inline u8 __iomem *
-_vop_vq_configspace(struct mic_device_desc __iomem *desc)
-{
- return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
-}
-
-static inline unsigned
-_vop_total_desc_size(struct mic_device_desc __iomem *desc)
-{
- return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-
-/* This gets the device's feature bits. */
-static u64 vop_get_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- u64 features = 0;
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
- u8 __iomem *in_features = _vop_vq_features(desc);
- int feature_len = ioread8(&desc->feature_len);
-
- bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++)
- if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT_ULL(i);
-
- return features;
-}
-
-static void vop_transport_features(struct virtio_device *vdev)
-{
- /*
- * Packed ring isn't enabled on virtio_vop for now,
- * because virtio_vop uses vring_new_virtqueue() which
- * creates virtio rings on preallocated memory.
- */
- __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
- __virtio_set_bit(vdev, VIRTIO_F_ACCESS_PLATFORM);
-}
-
-static int vop_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
- u8 feature_len = ioread8(&desc->feature_len);
- /* Second half of bitmap is features we accept. */
- u8 __iomem *out_features =
- _vop_vq_features(desc) + feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Give virtio_vop a chance to accept features. */
- vop_transport_features(vdev);
-
- memset_io(out_features, 0, feature_len);
- bits = min_t(unsigned, feature_len,
- sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
- &out_features[i / 8]);
- }
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void vop_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
-}
-
-static void vop_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status
- * field of the device descriptor. set_status also interrupts the host
- * to tell about status changes.
- */
-static u8 vop_get_status(struct virtio_device *vdev)
-{
- return ioread8(&to_vopvdev(vdev)->desc->status);
-}
-
-static void vop_set_status(struct virtio_device *dev, u8 status)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct vop_device *vpdev = vdev->vpdev;
-
- if (!status)
- return;
- iowrite8(status, &vdev->desc->status);
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
-}
-
-/* Inform host on a virtio device reset and wait for ack from host */
-static void vop_reset_inform_host(struct virtio_device *dev)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct mic_device_ctrl __iomem *dc = vdev->dc;
- struct vop_device *vpdev = vdev->vpdev;
- int retry;
-
- iowrite8(0, &dc->host_ack);
- iowrite8(1, &dc->vdev_reset);
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
-
- /* Wait till host completes all card accesses and acks the reset */
- for (retry = 100; retry--;) {
- if (ioread8(&dc->host_ack))
- break;
- msleep(100);
- }
-
- dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
-
- /* Reset status to 0 in case we timed out */
- iowrite8(0, &vdev->desc->status);
-}
-
-static void vop_reset(struct virtio_device *dev)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
-
- dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
- __func__, dev->id.device);
-
- vop_reset_inform_host(dev);
- complete_all(&vdev->reset_done);
-}
-
-/*
- * The virtio_ring code calls this API when it wants to notify the Host.
- */
-static bool vop_notify(struct virtqueue *vq)
-{
- struct _vop_vdev *vdev = vq->priv;
- struct vop_device *vpdev = vdev->vpdev;
-
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
- return true;
-}
-
-static void vop_del_vq(struct virtqueue *vq, int n)
-{
- struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
- struct vop_device *vpdev = vdev->vpdev;
-
- dma_unmap_single(&vpdev->dev, vdev->used[n],
- vdev->used_size[n], DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vdev->used_virt[n],
- get_order(vdev->used_size[n]));
- vring_del_virtqueue(vq);
- vpdev->hw_ops->unmap(vpdev, vdev->vr[n]);
- vdev->vr[n] = NULL;
-}
-
-static void vop_del_vqs(struct virtio_device *dev)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct virtqueue *vq, *n;
- int idx = 0;
-
- dev_dbg(_vop_dev(vdev), "%s\n", __func__);
-
- list_for_each_entry_safe(vq, n, &dev->vqs, list)
- vop_del_vq(vq, idx++);
-}
-
-static struct virtqueue *vop_new_virtqueue(unsigned int index,
- unsigned int num,
- struct virtio_device *vdev,
- bool context,
- void *pages,
- bool (*notify)(struct virtqueue *vq),
- void (*callback)(struct virtqueue *vq),
- const char *name,
- void *used)
-{
- bool weak_barriers = false;
- struct vring vring;
-
- vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
- vring.used = used;
-
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
-}
-
-/*
- * This routine will assign vring's allocated in host/io memory. Code in
- * virtio_ring.c however continues to access this io memory as if it were local
- * memory without io accessors.
- */
-static struct virtqueue *vop_find_vq(struct virtio_device *dev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct vop_device *vpdev = vdev->vpdev;
- struct mic_vqconfig __iomem *vqconfig;
- struct mic_vqconfig config;
- struct virtqueue *vq;
- void __iomem *va;
- struct _mic_vring_info __iomem *info;
- void *used;
- int vr_size, _vr_size, err, magic;
- u8 type = ioread8(&vdev->desc->type);
-
- if (index >= ioread8(&vdev->desc->num_vq))
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return ERR_PTR(-ENOENT);
-
- /* First assign the vring's allocated in host memory */
- vqconfig = _vop_vq_config(vdev->desc) + index;
- memcpy_fromio(&config, vqconfig, sizeof(config));
- _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
- vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
- if (!va)
- return ERR_PTR(-ENOMEM);
- vdev->vr[index] = va;
- memset_io(va, 0x0, _vr_size);
-
- info = va + _vr_size;
- magic = ioread32(&info->magic);
-
- if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
- err = -EIO;
- goto unmap;
- }
-
- vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(config.num));
- used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vdev->used_size[index]));
- vdev->used_virt[index] = used;
- if (!used) {
- err = -ENOMEM;
- dev_err(_vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto unmap;
- }
-
- vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
- (void __force *)va, vop_notify, callback,
- name, used);
- if (!vq) {
- err = -ENOMEM;
- goto free_used;
- }
-
- vdev->used[index] = dma_map_single(&vpdev->dev, used,
- vdev->used_size[index],
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
- err = -ENOMEM;
- dev_err(_vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto del_vq;
- }
- writeq(vdev->used[index], &vqconfig->used_address);
-
- vq->priv = vdev;
- return vq;
-del_vq:
- vring_del_virtqueue(vq);
-free_used:
- free_pages((unsigned long)used,
- get_order(vdev->used_size[index]));
-unmap:
- vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
- return ERR_PTR(err);
-}
-
-static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
- struct irq_affinity *desc)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct vop_device *vpdev = vdev->vpdev;
- struct mic_device_ctrl __iomem *dc = vdev->dc;
- int i, err, retry, queue_idx = 0;
-
- /* We must have this many virtqueues. */
- if (nvqs > ioread8(&vdev->desc->num_vq))
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
- vqs[i] = NULL;
- continue;
- }
-
- dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
- __func__, i, names[i]);
- vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- iowrite8(1, &dc->used_address_updated);
- /*
- * Send an interrupt to the host to inform it that used
- * rings have been re-assigned.
- */
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
- for (retry = 100; --retry;) {
- if (!ioread8(&dc->used_address_updated))
- break;
- msleep(100);
- }
-
- dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
- if (!retry) {
- err = -ENODEV;
- goto error;
- }
-
- return 0;
-error:
- vop_del_vqs(dev);
- return err;
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static const struct virtio_config_ops vop_vq_config_ops = {
- .get_features = vop_get_features,
- .finalize_features = vop_finalize_features,
- .get = vop_get,
- .set = vop_set,
- .get_status = vop_get_status,
- .set_status = vop_set_status,
- .reset = vop_reset,
- .find_vqs = vop_find_vqs,
- .del_vqs = vop_del_vqs,
-};
-
-static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
-{
- struct _vop_vdev *vdev = data;
- struct vop_device *vpdev = vdev->vpdev;
- struct virtqueue *vq;
-
- vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
- list_for_each_entry(vq, &vdev->vdev.vqs, list)
- vring_interrupt(0, vq);
-
- return IRQ_HANDLED;
-}
-
-static void vop_virtio_release_dev(struct device *_d)
-{
- struct virtio_device *vdev =
- container_of(_d, struct virtio_device, dev);
- struct _vop_vdev *vop_vdev =
- container_of(vdev, struct _vop_vdev, vdev);
-
- kfree(vop_vdev);
-}
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static int _vop_add_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct vop_device *vpdev,
- int dnode)
-{
- struct _vop_vdev *vdev, *reg_dev = NULL;
- int ret;
- u8 type = ioread8(&d->type);
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
- vdev->vpdev = vpdev;
- vdev->vdev.dev.parent = &vpdev->dev;
- vdev->vdev.dev.release = vop_virtio_release_dev;
- vdev->vdev.id.device = type;
- vdev->vdev.config = &vop_vq_config_ops;
- vdev->desc = d;
- vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
- vdev->dnode = dnode;
- vdev->vdev.priv = (void *)(unsigned long)dnode;
- init_completion(&vdev->reset_done);
-
- vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
- vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
- vop_virtio_intr_handler, "virtio intr",
- vdev, vdev->h2c_vdev_db);
- if (IS_ERR(vdev->virtio_cookie)) {
- ret = PTR_ERR(vdev->virtio_cookie);
- goto kfree;
- }
- iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
- vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
-
- ret = register_virtio_device(&vdev->vdev);
- reg_dev = vdev;
- if (ret) {
- dev_err(_vop_dev(vdev),
- "Failed to register vop device %u type %u\n",
- offset, type);
- goto free_irq;
- }
- writeq((unsigned long)vdev, &vdev->dc->vdev);
- dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
- __func__, offset, type, vdev);
-
- return 0;
-
-free_irq:
- vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
-kfree:
- if (reg_dev)
- put_device(&vdev->vdev.dev);
- else
- kfree(vdev);
- return ret;
-}
-
-/*
- * match for a vop device with a specific desc pointer
- */
-static int vop_match_desc(struct device *dev, void *data)
-{
- struct virtio_device *_dev = dev_to_virtio(dev);
- struct _vop_vdev *vdev = to_vopvdev(_dev);
-
- return vdev->desc == (void __iomem *)data;
-}
-
-static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl __iomem *dc)
-{
- return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
-}
-
-static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
- unsigned int offset,
- struct vop_device *vpdev)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
-
- if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
- return;
-
- dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
- virtio_config_changed(&vdev->vdev);
- iowrite8(1, &dc->guest_ack);
-}
-
-/*
- * removes a virtio device if a hot remove event has been
- * requested by the host.
- */
-static int _vop_remove_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct vop_device *vpdev)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
- u8 status;
- int ret = -1;
-
- if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
- struct device *dev = get_device(&vdev->vdev.dev);
-
- dev_dbg(&vpdev->dev,
- "%s %d config_change %d type %d vdev %p\n",
- __func__, __LINE__,
- ioread8(&dc->config_change), ioread8(&d->type), vdev);
- status = ioread8(&d->status);
- reinit_completion(&vdev->reset_done);
- unregister_virtio_device(&vdev->vdev);
- vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
- iowrite8(-1, &dc->h2c_vdev_db);
- if (status & VIRTIO_CONFIG_S_DRIVER_OK)
- wait_for_completion(&vdev->reset_done);
- put_device(dev);
- iowrite8(1, &dc->guest_ack);
- dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
- __func__, __LINE__, ioread8(&dc->guest_ack));
- iowrite8(-1, &d->type);
- ret = 0;
- }
- return ret;
-}
-
-#define REMOVE_DEVICES true
-
-static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
- bool remove, int dnode)
-{
- s8 type;
- unsigned int i;
- struct mic_device_desc __iomem *d;
- struct mic_device_ctrl __iomem *dc;
- struct device *dev;
-
- for (i = sizeof(struct mic_bootparam);
- i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
- d = dp + i;
- dc = (void __iomem *)d + _vop_aligned_desc_size(d);
- /*
- * This read barrier is paired with the corresponding write
- * barrier on the host which is inserted before adding or
- * removing a virtio device descriptor, by updating the type.
- */
- rmb();
- type = ioread8(&d->type);
-
- /* end of list */
- if (type == 0)
- break;
-
- if (type == -1)
- continue;
-
- /* device already exists */
- dev = device_find_child(&vpdev->dev, (void __force *)d,
- vop_match_desc);
- if (dev) {
- if (remove)
- iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
- &dc->config_change);
- put_device(dev);
- _vop_handle_config_change(d, i, vpdev);
- _vop_remove_device(d, i, vpdev);
- if (remove) {
- iowrite8(0, &dc->config_change);
- iowrite8(0, &dc->guest_ack);
- }
- continue;
- }
-
- /* new device */
- dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
- __func__, __LINE__, d);
- if (!remove)
- _vop_add_device(d, i, vpdev, dnode);
- }
-}
-
-static void vop_scan_devices(struct vop_info *vi,
- struct vop_device *vpdev, bool remove)
-{
- void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
-
- if (!dp)
- return;
- mutex_lock(&vi->vop_mutex);
- _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
- mutex_unlock(&vi->vop_mutex);
-}
-
-/*
- * vop_hotplug_device tries to find changes in the device page.
- */
-static void vop_hotplug_devices(struct work_struct *work)
-{
- struct vop_info *vi = container_of(work, struct vop_info,
- hotplug_work);
-
- vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
-}
-
-/*
- * Interrupt handler for hot plug/config changes etc.
- */
-static irqreturn_t vop_extint_handler(int irq, void *data)
-{
- struct vop_info *vi = data;
- struct mic_bootparam __iomem *bp;
- struct vop_device *vpdev = vi->vpdev;
-
- bp = vpdev->hw_ops->get_remote_dp(vpdev);
- dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
- __func__, __LINE__);
- vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
- schedule_work(&vi->hotplug_work);
- return IRQ_HANDLED;
-}
-
-static int vop_driver_probe(struct vop_device *vpdev)
-{
- struct vop_info *vi;
- int rc;
-
- vi = kzalloc(sizeof(*vi), GFP_KERNEL);
- if (!vi) {
- rc = -ENOMEM;
- goto exit;
- }
- dev_set_drvdata(&vpdev->dev, vi);
- vi->vpdev = vpdev;
-
- mutex_init(&vi->vop_mutex);
- INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
- if (vpdev->dnode) {
- rc = vop_host_init(vi);
- if (rc < 0)
- goto free;
- } else {
- struct mic_bootparam __iomem *bootparam;
-
- vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
-
- vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
- vi->cookie = vpdev->hw_ops->request_irq(vpdev,
- vop_extint_handler,
- "virtio_config_intr",
- vi, vi->h2c_config_db);
- if (IS_ERR(vi->cookie)) {
- rc = PTR_ERR(vi->cookie);
- goto free;
- }
- bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
- iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
- }
- vop_init_debugfs(vi);
- return 0;
-free:
- kfree(vi);
-exit:
- return rc;
-}
-
-static void vop_driver_remove(struct vop_device *vpdev)
-{
- struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
-
- if (vpdev->dnode) {
- vop_host_uninit(vi);
- } else {
- struct mic_bootparam __iomem *bootparam =
- vpdev->hw_ops->get_remote_dp(vpdev);
- if (bootparam)
- iowrite8(-1, &bootparam->h2c_config_db);
- vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
- flush_work(&vi->hotplug_work);
- vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
- }
- vop_exit_debugfs(vi);
- kfree(vi);
-}
-
-static const struct vop_device_id id_table[] = {
- { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
- { 0 },
-};
-
-static struct vop_driver vop_driver = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = vop_driver_probe,
- .remove = vop_driver_remove,
-};
-
-module_vop_driver(vop_driver);
-
-MODULE_DEVICE_TABLE(mbus, id_table);
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/vop/vop_main.h b/drivers/misc/mic/vop/vop_main.h
deleted file mode 100644
index 2451d9218137..000000000000
--- a/drivers/misc/mic/vop/vop_main.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#ifndef _VOP_MAIN_H_
-#define _VOP_MAIN_H_
-
-#include <linux/vringh.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio.h>
-#include <linux/miscdevice.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-#include "../bus/vop_bus.h"
-
-/*
- * Note on endianness.
- * 1. Host can be both BE or LE
- * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
- * rings and ioreadXX/iowriteXX to access used ring.
- * 3. Device page exposed by host to guest contains LE values. Guest
- * accesses these using ioreadXX/iowriteXX etc. This way in general we
- * obey the virtio spec according to which guest works with native
- * endianness and host is aware of guest endianness and does all
- * required endianness conversion.
- * 4. Data provided from user space to guest (in ADD_DEVICE and
- * CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
- * in guest endianness.
- */
-
-/*
- * vop_info - Allocated per invocation of VOP probe
- *
- * @vpdev: VOP device
- * @hotplug_work: Handle virtio device creation, deletion and configuration
- * @cookie: Cookie received upon requesting a virtio configuration interrupt
- * @h2c_config_db: The doorbell used by the peer to indicate a config change
- * @vdev_list: List of "active" virtio devices injected in the peer node
- * @vop_mutex: Synchronize access to the device page as well as serialize
- * creation/deletion of virtio devices on the peer node
- * @dp: Peer device page information
- * @dbg: Debugfs entry
- * @dma_ch: The DMA channel used by this transport for data transfers.
- * @name: Name for this transport used in misc device creation.
- * @miscdev: The misc device registered.
- */
-struct vop_info {
- struct vop_device *vpdev;
- struct work_struct hotplug_work;
- struct mic_irq *cookie;
- int h2c_config_db;
- struct list_head vdev_list;
- struct mutex vop_mutex;
- void __iomem *dp;
- struct dentry *dbg;
- struct dma_chan *dma_ch;
- char name[16];
- struct miscdevice miscdev;
-};
-
-/**
- * struct vop_vringh - Virtio ring host information.
- *
- * @vring: The VOP vring used for setting up user space mappings.
- * @vrh: The host VRINGH used for accessing the card vrings.
- * @riov: The VRINGH read kernel IOV.
- * @wiov: The VRINGH write kernel IOV.
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
- * @vr_mutex: Mutex for synchronizing access to the VRING.
- * @buf: Temporary kernel buffer used to copy in/out data
- * from/to the card via DMA.
- * @buf_da: dma address of buf.
- * @vdev: Back pointer to VOP virtio device for vringh_notify(..).
- */
-struct vop_vringh {
- struct mic_vring vring;
- struct vringh vrh;
- struct vringh_kiov riov;
- struct vringh_kiov wiov;
- u16 head;
- struct mutex vr_mutex;
- void *buf;
- dma_addr_t buf_da;
- struct vop_vdev *vdev;
-};
-
-/**
- * struct vop_vdev - Host information for a card Virtio device.
- *
- * @virtio_id - Virtio device id.
- * @waitq - Waitqueue to allow ring3 apps to poll.
- * @vpdev - pointer to VOP bus device.
- * @poll_wake - Used for waking up threads blocked in poll.
- * @out_bytes - Debug stats for number of bytes copied from host to card.
- * @in_bytes - Debug stats for number of bytes copied from card to host.
- * @out_bytes_dma - Debug stats for number of bytes copied from host to card
- * using DMA.
- * @in_bytes_dma - Debug stats for number of bytes copied from card to host
- * using DMA.
- * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
- * the transfer length did not have the required DMA alignment.
- * @tx_dst_unaligned - Debug stats for number of bytes copied where the
- * destination address on the card did not have the required DMA alignment.
- * @vvr - Store per VRING data structures.
- * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
- * @dd - Virtio device descriptor.
- * @dc - Virtio device control fields.
- * @list - List of Virtio devices.
- * @virtio_db - The doorbell used by the card to interrupt the host.
- * @virtio_cookie - The cookie returned while requesting interrupts.
- * @vi: Transport information.
- * @vdev_mutex: Mutex synchronizing virtio device injection,
- * removal and data transfers.
- * @destroy: Track if a virtio device is being destroyed.
- * @deleted: The virtio device has been deleted.
- */
-struct vop_vdev {
- int virtio_id;
- wait_queue_head_t waitq;
- struct vop_device *vpdev;
- int poll_wake;
- unsigned long out_bytes;
- unsigned long in_bytes;
- unsigned long out_bytes_dma;
- unsigned long in_bytes_dma;
- unsigned long tx_len_unaligned;
- unsigned long tx_dst_unaligned;
- unsigned long rx_dst_unaligned;
- struct vop_vringh vvr[MIC_MAX_VRINGS];
- struct work_struct virtio_bh_work;
- struct mic_device_desc *dd;
- struct mic_device_ctrl *dc;
- struct list_head list;
- int virtio_db;
- struct mic_irq *virtio_cookie;
- struct vop_info *vi;
- struct mutex vdev_mutex;
- struct completion destroy;
- bool deleted;
-};
-
-/* Helper API to check if a virtio device is running */
-static inline bool vop_vdevup(struct vop_vdev *vdev)
-{
- return !!vdev->dd->status;
-}
-
-void vop_init_debugfs(struct vop_info *vi);
-void vop_exit_debugfs(struct vop_info *vi);
-int vop_host_init(struct vop_info *vi);
-void vop_host_uninit(struct vop_info *vi);
-#endif
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
deleted file mode 100644
index 7014ffe88632..000000000000
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ /dev/null
@@ -1,1166 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-#include <linux/mic_ioctl.h>
-#include "vop_main.h"
-
-/* Helper API to obtain the VOP PCIe device */
-static inline struct device *vop_dev(struct vop_vdev *vdev)
-{
- return vdev->vpdev->dev.parent;
-}
-
-/* Helper API to check if a virtio device is initialized */
-static inline int vop_vdev_inited(struct vop_vdev *vdev)
-{
- if (!vdev)
- return -EINVAL;
- /* Device has not been created yet */
- if (!vdev->dd || !vdev->dd->type) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
- /* Device has been removed/deleted */
- if (vdev->dd->type == -1) {
- dev_dbg(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, -ENODEV);
- return -ENODEV;
- }
- return 0;
-}
-
-static void _vop_notify(struct vringh *vrh)
-{
- struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh);
- struct vop_vdev *vdev = vvrh->vdev;
- struct vop_device *vpdev = vdev->vpdev;
- s8 db = vdev->dc->h2c_vdev_db;
-
- if (db != -1)
- vpdev->hw_ops->send_intr(vpdev, db);
-}
-
-static void vop_virtio_init_post(struct vop_vdev *vdev)
-{
- struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
- struct vop_device *vpdev = vdev->vpdev;
- int i, used_size;
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- used_size = PAGE_ALIGN(sizeof(u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(vqconfig->num));
- if (!le64_to_cpu(vqconfig[i].used_address)) {
- dev_warn(vop_dev(vdev), "used_address zero??\n");
- continue;
- }
- vdev->vvr[i].vrh.vring.used =
- (void __force *)vpdev->hw_ops->remap(
- vpdev,
- le64_to_cpu(vqconfig[i].used_address),
- used_size);
- }
-
- vdev->dc->used_address_updated = 0;
-
- dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
- __func__, vdev->virtio_id);
-}
-
-static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
-{
- int i;
-
- dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n",
- __func__, vdev->dd->status, vdev->virtio_id);
-
- for (i = 0; i < vdev->dd->num_vq; i++)
- /*
- * Avoid lockdep false positive. The + 1 is for the vop
- * mutex which is held in the reset devices code path.
- */
- mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
-
- /* 0 status means "reset" */
- vdev->dd->status = 0;
- vdev->dc->vdev_reset = 0;
- vdev->dc->host_ack = 1;
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- struct vringh *vrh = &vdev->vvr[i].vrh;
-
- vdev->vvr[i].vring.info->avail_idx = 0;
- vrh->completed = 0;
- vrh->last_avail_idx = 0;
- vrh->last_used_idx = 0;
- }
-
- for (i = 0; i < vdev->dd->num_vq; i++)
- mutex_unlock(&vdev->vvr[i].vr_mutex);
-}
-
-static void vop_virtio_reset_devices(struct vop_info *vi)
-{
- struct list_head *pos, *tmp;
- struct vop_vdev *vdev;
-
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev = list_entry(pos, struct vop_vdev, list);
- vop_virtio_device_reset(vdev);
- vdev->poll_wake = 1;
- wake_up(&vdev->waitq);
- }
-}
-
-static void vop_bh_handler(struct work_struct *work)
-{
- struct vop_vdev *vdev = container_of(work, struct vop_vdev,
- virtio_bh_work);
-
- if (vdev->dc->used_address_updated)
- vop_virtio_init_post(vdev);
-
- if (vdev->dc->vdev_reset)
- vop_virtio_device_reset(vdev);
-
- vdev->poll_wake = 1;
- wake_up(&vdev->waitq);
-}
-
-static irqreturn_t _vop_virtio_intr_handler(int irq, void *data)
-{
- struct vop_vdev *vdev = data;
- struct vop_device *vpdev = vdev->vpdev;
-
- vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db);
- schedule_work(&vdev->virtio_bh_work);
- return IRQ_HANDLED;
-}
-
-static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp)
-{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int ret = 0, retry, i;
- struct vop_device *vpdev = vdev->vpdev;
- struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
- s8 db = bootparam->h2c_config_db;
-
- mutex_lock(&vi->vop_mutex);
- for (i = 0; i < vdev->dd->num_vq; i++)
- mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
-
- if (db == -1 || vdev->dd->type == -1) {
- ret = -EIO;
- goto exit;
- }
-
- memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
- vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
- vpdev->hw_ops->send_intr(vpdev, db);
-
- for (retry = 100; retry--;) {
- ret = wait_event_timeout(wake, vdev->dc->guest_ack,
- msecs_to_jiffies(100));
- if (ret)
- break;
- }
-
- dev_dbg(vop_dev(vdev),
- "%s %d retry: %d\n", __func__, __LINE__, retry);
- vdev->dc->config_change = 0;
- vdev->dc->guest_ack = 0;
-exit:
- for (i = 0; i < vdev->dd->num_vq; i++)
- mutex_unlock(&vdev->vvr[i].vr_mutex);
- mutex_unlock(&vi->vop_mutex);
- return ret;
-}
-
-static int vop_copy_dp_entry(struct vop_vdev *vdev,
- struct mic_device_desc *argp, __u8 *type,
- struct mic_device_desc **devpage)
-{
- struct vop_device *vpdev = vdev->vpdev;
- struct mic_device_desc *devp;
- struct mic_vqconfig *vqconfig;
- int ret = 0, i;
- bool slot_found = false;
-
- vqconfig = mic_vq_config(argp);
- for (i = 0; i < argp->num_vq; i++) {
- if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
- ret = -EINVAL;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- }
-
- /* Find the first free device page entry */
- for (i = sizeof(struct mic_bootparam);
- i < MIC_DP_SIZE - mic_total_desc_size(argp);
- i += mic_total_desc_size(devp)) {
- devp = vpdev->hw_ops->get_dp(vpdev) + i;
- if (devp->type == 0 || devp->type == -1) {
- slot_found = true;
- break;
- }
- }
- if (!slot_found) {
- ret = -EINVAL;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- /*
- * Save off the type before doing the memcpy. Type will be set in the
- * end after completing all initialization for the new device.
- */
- *type = argp->type;
- argp->type = 0;
- memcpy(devp, argp, mic_desc_size(argp));
-
- *devpage = devp;
-exit:
- return ret;
-}
-
-static void vop_init_device_ctrl(struct vop_vdev *vdev,
- struct mic_device_desc *devpage)
-{
- struct mic_device_ctrl *dc;
-
- dc = (void *)devpage + mic_aligned_desc_size(devpage);
-
- dc->config_change = 0;
- dc->guest_ack = 0;
- dc->vdev_reset = 0;
- dc->host_ack = 0;
- dc->used_address_updated = 0;
- dc->c2h_vdev_db = -1;
- dc->h2c_vdev_db = -1;
- vdev->dc = dc;
-}
-
-static int vop_virtio_add_device(struct vop_vdev *vdev,
- struct mic_device_desc *argp)
-{
- struct vop_info *vi = vdev->vi;
- struct vop_device *vpdev = vi->vpdev;
- struct mic_device_desc *dd = NULL;
- struct mic_vqconfig *vqconfig;
- int vr_size, i, j, ret;
- u8 type = 0;
- s8 db = -1;
- char irqname[16];
- struct mic_bootparam *bootparam;
- u16 num;
- dma_addr_t vr_addr;
-
- bootparam = vpdev->hw_ops->get_dp(vpdev);
- init_waitqueue_head(&vdev->waitq);
- INIT_LIST_HEAD(&vdev->list);
- vdev->vpdev = vpdev;
-
- ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
-
- vop_init_device_ctrl(vdev, dd);
-
- vdev->dd = dd;
- vdev->virtio_id = type;
- vqconfig = mic_vq_config(dd);
- INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
-
- for (i = 0; i < dd->num_vq; i++) {
- struct vop_vringh *vvr = &vdev->vvr[i];
- struct mic_vring *vr = &vdev->vvr[i].vring;
-
- num = le16_to_cpu(vqconfig[i].num);
- mutex_init(&vvr->vr_mutex);
- vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
- sizeof(struct _mic_vring_info));
- vr->va = (void *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vr_size));
- if (!vr->va) {
- ret = -ENOMEM;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vr->len = vr_size;
- vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
- vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
- vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&vpdev->dev, vr_addr)) {
- free_pages((unsigned long)vr->va, get_order(vr_size));
- ret = -ENOMEM;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vqconfig[i].address = cpu_to_le64(vr_addr);
-
- vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
- ret = vringh_init_kern(&vvr->vrh,
- *(u32 *)mic_vq_features(vdev->dd),
- num, false, vr->vr.desc, vr->vr.avail,
- vr->vr.used);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vringh_kiov_init(&vvr->riov, NULL, 0);
- vringh_kiov_init(&vvr->wiov, NULL, 0);
- vvr->head = USHRT_MAX;
- vvr->vdev = vdev;
- vvr->vrh.notify = _vop_notify;
- dev_dbg(&vpdev->dev,
- "%s %d index %d va %p info %p vr_size 0x%x\n",
- __func__, __LINE__, i, vr->va, vr->info, vr_size);
- vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
- get_order(VOP_INT_DMA_BUF_SIZE));
- vvr->buf_da = dma_map_single(&vpdev->dev,
- vvr->buf, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- }
-
- snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
- vdev->virtio_id);
- vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
- vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
- _vop_virtio_intr_handler, irqname, vdev,
- vdev->virtio_db);
- if (IS_ERR(vdev->virtio_cookie)) {
- ret = PTR_ERR(vdev->virtio_cookie);
- dev_dbg(&vpdev->dev, "request irq failed\n");
- goto err;
- }
-
- vdev->dc->c2h_vdev_db = vdev->virtio_db;
-
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- dd->type = type;
- argp->type = type;
-
- if (bootparam) {
- db = bootparam->h2c_config_db;
- if (db != -1)
- vpdev->hw_ops->send_intr(vpdev, db);
- }
- dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
- return 0;
-err:
- vqconfig = mic_vq_config(dd);
- for (j = 0; j < i; j++) {
- struct vop_vringh *vvr = &vdev->vvr[j];
-
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
- }
- return ret;
-}
-
-static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp,
- struct vop_device *vpdev)
-{
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
- s8 db;
- int ret, retry;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
-
- devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
- db = bootparam->h2c_config_db;
- if (db != -1)
- vpdev->hw_ops->send_intr(vpdev, db);
- else
- goto done;
- for (retry = 15; retry--;) {
- ret = wait_event_timeout(wake, devp->guest_ack,
- msecs_to_jiffies(1000));
- if (ret)
- break;
- }
-done:
- devp->config_change = 0;
- devp->guest_ack = 0;
-}
-
-static void vop_virtio_del_device(struct vop_vdev *vdev)
-{
- struct vop_info *vi = vdev->vi;
- struct vop_device *vpdev = vdev->vpdev;
- int i;
- struct mic_vqconfig *vqconfig;
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
-
- if (!bootparam)
- goto skip_hot_remove;
- vop_dev_remove(vi, vdev->dc, vpdev);
-skip_hot_remove:
- vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
- flush_work(&vdev->virtio_bh_work);
- vqconfig = mic_vq_config(vdev->dd);
- for (i = 0; i < vdev->dd->num_vq; i++) {
- struct vop_vringh *vvr = &vdev->vvr[i];
-
- dma_unmap_single(&vpdev->dev,
- vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->buf,
- get_order(VOP_INT_DMA_BUF_SIZE));
- vringh_kiov_cleanup(&vvr->riov);
- vringh_kiov_cleanup(&vvr->wiov);
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
- }
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- vdev->dd->type = -1;
-}
-
-/*
- * vop_sync_dma - Wrapper for synchronous DMAs.
- *
- * @dev - The address of the pointer to the device instance used
- * for DMA registration.
- * @dst - destination DMA address.
- * @src - source DMA address.
- * @len - size of the transfer.
- *
- * Return DMA_SUCCESS on success
- */
-static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src,
- size_t len)
-{
- int err = 0;
- struct dma_device *ddev;
- struct dma_async_tx_descriptor *tx;
- struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- struct dma_chan *vop_ch = vi->dma_ch;
-
- if (!vop_ch) {
- err = -EBUSY;
- goto error;
- }
- ddev = vop_ch->device;
- tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len,
- DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- goto error;
- } else {
- dma_cookie_t cookie;
-
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = -ENOMEM;
- goto error;
- }
- dma_async_issue_pending(vop_ch);
- err = dma_sync_wait(vop_ch, cookie);
- }
-error:
- if (err)
- dev_err(&vi->vpdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
-}
-
-#define VOP_USE_DMA true
-
-/*
- * Initiates the copies across the PCIe bus from card memory to a user
- * space buffer. When transfers are done using DMA, source/destination
- * addresses and transfer length must follow the alignment requirements of
- * the MIC DMA engine.
- */
-static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
- struct vop_vringh *vvr = &vdev->vvr[vr_idx];
- struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- size_t dma_alignment;
- bool x200;
- size_t dma_offset, partlen;
- int err;
-
- if (!VOP_USE_DMA || !vi->dma_ch) {
- if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- vdev->in_bytes += len;
- err = 0;
- goto err;
- }
-
- dma_alignment = 1 << vi->dma_ch->device->copy_align;
- x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
-
- dma_offset = daddr - round_down(daddr, dma_alignment);
- daddr -= dma_offset;
- len += dma_offset;
- /*
- * X100 uses DMA addresses as seen by the card so adding
- * the aperture base is not required for DMA. However x200
- * requires DMA addresses to be an offset into the bar so
- * add the aperture base for x200.
- */
- if (x200)
- daddr += vpdev->aper->pa;
- while (len) {
- partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
- err = vop_sync_dma(vdev, vvr->buf_da, daddr,
- ALIGN(partlen, dma_alignment));
- if (err) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- if (copy_to_user(ubuf, vvr->buf + dma_offset,
- partlen - dma_offset)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- vdev->in_bytes_dma += partlen;
- vdev->in_bytes += partlen;
- len -= partlen;
- dma_offset = 0;
- }
- err = 0;
-err:
- vpdev->hw_ops->unmap(vpdev, dbuf);
- dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
- __func__, ubuf, dbuf, len, vr_idx);
- return err;
-}
-
-/*
- * Initiates copies across the PCIe bus from a user space buffer to card
- * memory. When transfers are done using DMA, source/destination addresses
- * and transfer length must follow the alignment requirements of the MIC
- * DMA engine.
- */
-static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
- struct vop_vringh *vvr = &vdev->vvr[vr_idx];
- struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- size_t dma_alignment;
- bool x200;
- size_t partlen;
- bool dma = VOP_USE_DMA && vi->dma_ch;
- int err = 0;
- size_t offset = 0;
-
- if (dma) {
- dma_alignment = 1 << vi->dma_ch->device->copy_align;
- x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
-
- if (daddr & (dma_alignment - 1)) {
- vdev->tx_dst_unaligned += len;
- dma = false;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- vdev->tx_len_unaligned += len;
- dma = false;
- }
- }
-
- if (!dma)
- goto memcpy;
-
- /*
- * X100 uses DMA addresses as seen by the card so adding
- * the aperture base is not required for DMA. However x200
- * requires DMA addresses to be an offset into the bar so
- * add the aperture base for x200.
- */
- if (x200)
- daddr += vpdev->aper->pa;
- while (len) {
- partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
-
- if (copy_from_user(vvr->buf, ubuf, partlen)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- err = vop_sync_dma(vdev, daddr, vvr->buf_da,
- ALIGN(partlen, dma_alignment));
- if (err) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- vdev->out_bytes_dma += partlen;
- vdev->out_bytes += partlen;
- len -= partlen;
- }
-memcpy:
- /*
- * We are copying to IO below and should ideally use something
- * like copy_from_user_toio(..) if it existed.
- */
- while (len) {
- partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
-
- if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- memcpy_toio(dbuf + offset, vvr->buf, partlen);
- offset += partlen;
- vdev->out_bytes += partlen;
- len -= partlen;
- }
- err = 0;
-err:
- vpdev->hw_ops->unmap(vpdev, dbuf);
- dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
- __func__, ubuf, dbuf, len, vr_idx);
- return err;
-}
-
-#define MIC_VRINGH_READ true
-
-/* Determine the total number of bytes consumed in a VRINGH KIOV */
-static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov)
-{
- int i;
- u32 total = iov->consumed;
-
- for (i = 0; i < iov->i; i++)
- total += iov->iov[i].iov_len;
- return total;
-}
-
-/*
- * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
- * This API is heavily based on the vringh_iov_xfer(..) implementation
- * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
- * and vringh_iov_push_kern(..) directly is because there is no
- * way to override the VRINGH xfer(..) routines as of v3.10.
- */
-static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
- void __user *ubuf, size_t len, bool read, int vr_idx,
- size_t *out_len)
-{
- int ret = 0;
- size_t partlen, tot_len = 0;
-
- while (len && iov->i < iov->used) {
- struct kvec *kiov = &iov->iov[iov->i];
- unsigned long daddr = (unsigned long)kiov->iov_base;
-
- partlen = min(kiov->iov_len, len);
- if (read)
- ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
- daddr,
- kiov->iov_len,
- vr_idx);
- else
- ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
- daddr,
- kiov->iov_len,
- vr_idx);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= partlen;
- ubuf += partlen;
- tot_len += partlen;
- iov->consumed += partlen;
- kiov->iov_len -= partlen;
- kiov->iov_base += partlen;
- if (!kiov->iov_len) {
- /* Fix up old iov element then increment. */
- kiov->iov_len = iov->consumed;
- kiov->iov_base -= iov->consumed;
-
- iov->consumed = 0;
- iov->i++;
- }
- }
- *out_len = tot_len;
- return ret;
-}
-
-/*
- * Use the standard VRINGH infrastructure in the kernel to fetch new
- * descriptors, initiate the copies and update the used ring.
- */
-static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy)
-{
- int ret = 0;
- u32 iovcnt = copy->iovcnt;
- struct iovec iov;
- struct iovec __user *u_iov = copy->iov;
- void __user *ubuf = NULL;
- struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx];
- struct vringh_kiov *riov = &vvr->riov;
- struct vringh_kiov *wiov = &vvr->wiov;
- struct vringh *vrh = &vvr->vrh;
- u16 *head = &vvr->head;
- struct mic_vring *vr = &vvr->vring;
- size_t len = 0, out_len;
-
- copy->out_len = 0;
- /* Fetch a new IOVEC if all previous elements have been processed */
- if (riov->i == riov->used && wiov->i == wiov->used) {
- ret = vringh_getdesc_kern(vrh, riov, wiov,
- head, GFP_KERNEL);
- /* Check if there are available descriptors */
- if (ret <= 0)
- return ret;
- }
- while (iovcnt) {
- if (!len) {
- /* Copy over a new iovec from user space. */
- ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
- if (ret) {
- ret = -EINVAL;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len = iov.iov_len;
- ubuf = iov.iov_base;
- }
- /* Issue all the read descriptors first */
- ret = vop_vringh_copy(vdev, riov, ubuf, len,
- MIC_VRINGH_READ, copy->vr_idx, &out_len);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- /* Issue the write descriptors next */
- ret = vop_vringh_copy(vdev, wiov, ubuf, len,
- !MIC_VRINGH_READ, copy->vr_idx, &out_len);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- if (!len) {
- /* One user space iovec is now completed */
- iovcnt--;
- u_iov++;
- }
- /* Exit loop if all elements in KIOVs have been processed. */
- if (riov->i == riov->used && wiov->i == wiov->used)
- break;
- }
- /*
- * Update the used ring if a descriptor was available and some data was
- * copied in/out and the user asked for a used ring update.
- */
- if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
- u32 total = 0;
-
- /* Determine the total data consumed */
- total += vop_vringh_iov_consumed(riov);
- total += vop_vringh_iov_consumed(wiov);
- vringh_complete_kern(vrh, *head, total);
- *head = USHRT_MAX;
- if (vringh_need_notify_kern(vrh) > 0)
- vringh_notify(vrh);
- vringh_kiov_cleanup(riov);
- vringh_kiov_cleanup(wiov);
- /* Update avail idx for user space */
- vr->info->avail_idx = vrh->last_avail_idx;
- }
- return ret;
-}
-
-static inline int vop_verify_copy_args(struct vop_vdev *vdev,
- struct mic_copy_desc *copy)
-{
- if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
- return -EINVAL;
- return 0;
-}
-
-/* Copy a specified number of virtio descriptors in a chain */
-static int vop_virtio_copy_desc(struct vop_vdev *vdev,
- struct mic_copy_desc *copy)
-{
- int err;
- struct vop_vringh *vvr;
-
- err = vop_verify_copy_args(vdev, copy);
- if (err)
- return err;
-
- vvr = &vdev->vvr[copy->vr_idx];
- mutex_lock(&vvr->vr_mutex);
- if (!vop_vdevup(vdev)) {
- err = -ENODEV;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- err = _vop_virtio_copy(vdev, copy);
- if (err) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- }
-err:
- mutex_unlock(&vvr->vr_mutex);
- return err;
-}
-
-static int vop_open(struct inode *inode, struct file *f)
-{
- struct vop_vdev *vdev;
- struct vop_info *vi = container_of(f->private_data,
- struct vop_info, miscdev);
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
- vdev->vi = vi;
- mutex_init(&vdev->vdev_mutex);
- f->private_data = vdev;
- init_completion(&vdev->destroy);
- complete(&vdev->destroy);
- return 0;
-}
-
-static int vop_release(struct inode *inode, struct file *f)
-{
- struct vop_vdev *vdev = f->private_data, *vdev_tmp;
- struct vop_info *vi = vdev->vi;
- struct list_head *pos, *tmp;
- bool found = false;
-
- mutex_lock(&vdev->vdev_mutex);
- if (vdev->deleted)
- goto unlock;
- mutex_lock(&vi->vop_mutex);
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev_tmp = list_entry(pos, struct vop_vdev, list);
- if (vdev == vdev_tmp) {
- vop_virtio_del_device(vdev);
- list_del(pos);
- found = true;
- break;
- }
- }
- mutex_unlock(&vi->vop_mutex);
-unlock:
- mutex_unlock(&vdev->vdev_mutex);
- if (!found)
- wait_for_completion(&vdev->destroy);
- f->private_data = NULL;
- kfree(vdev);
- return 0;
-}
-
-static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- struct vop_vdev *vdev = f->private_data;
- struct vop_info *vi = vdev->vi;
- void __user *argp = (void __user *)arg;
- int ret;
-
- switch (cmd) {
- case MIC_VIRTIO_ADD_DEVICE:
- {
- struct mic_device_desc dd, *dd_config;
-
- if (copy_from_user(&dd, argp, sizeof(dd)))
- return -EFAULT;
-
- if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
- dd.num_vq > MIC_MAX_VRINGS)
- return -EINVAL;
-
- dd_config = memdup_user(argp, mic_desc_size(&dd));
- if (IS_ERR(dd_config))
- return PTR_ERR(dd_config);
-
- /* Ensure desc has not changed between the two reads */
- if (memcmp(&dd, dd_config, sizeof(dd))) {
- ret = -EINVAL;
- goto free_ret;
- }
- mutex_lock(&vdev->vdev_mutex);
- mutex_lock(&vi->vop_mutex);
- ret = vop_virtio_add_device(vdev, dd_config);
- if (ret)
- goto unlock_ret;
- list_add_tail(&vdev->list, &vi->vdev_list);
-unlock_ret:
- mutex_unlock(&vi->vop_mutex);
- mutex_unlock(&vdev->vdev_mutex);
-free_ret:
- kfree(dd_config);
- return ret;
- }
- case MIC_VIRTIO_COPY_DESC:
- {
- struct mic_copy_desc copy;
-
- mutex_lock(&vdev->vdev_mutex);
- ret = vop_vdev_inited(vdev);
- if (ret)
- goto _unlock_ret;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- ret = -EFAULT;
- goto _unlock_ret;
- }
-
- ret = vop_virtio_copy_desc(vdev, &copy);
- if (ret < 0)
- goto _unlock_ret;
- if (copy_to_user(
- &((struct mic_copy_desc __user *)argp)->out_len,
- &copy.out_len, sizeof(copy.out_len)))
- ret = -EFAULT;
-_unlock_ret:
- mutex_unlock(&vdev->vdev_mutex);
- return ret;
- }
- case MIC_VIRTIO_CONFIG_CHANGE:
- {
- void *buf;
-
- mutex_lock(&vdev->vdev_mutex);
- ret = vop_vdev_inited(vdev);
- if (ret)
- goto __unlock_ret;
- buf = memdup_user(argp, vdev->dd->config_len);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
- goto __unlock_ret;
- }
- ret = vop_virtio_config_change(vdev, buf);
- kfree(buf);
-__unlock_ret:
- mutex_unlock(&vdev->vdev_mutex);
- return ret;
- }
- default:
- return -ENOIOCTLCMD;
- };
- return 0;
-}
-
-/*
- * We return EPOLLIN | EPOLLOUT from poll when new buffers are enqueued, and
- * not when previously enqueued buffers may be available. This means that
- * in the card->host (TX) path, when userspace is unblocked by poll it
- * must drain all available descriptors or it can stall.
- */
-static __poll_t vop_poll(struct file *f, poll_table *wait)
-{
- struct vop_vdev *vdev = f->private_data;
- __poll_t mask = 0;
-
- mutex_lock(&vdev->vdev_mutex);
- if (vop_vdev_inited(vdev)) {
- mask = EPOLLERR;
- goto done;
- }
- poll_wait(f, &vdev->waitq, wait);
- if (vop_vdev_inited(vdev)) {
- mask = EPOLLERR;
- } else if (vdev->poll_wake) {
- vdev->poll_wake = 0;
- mask = EPOLLIN | EPOLLOUT;
- }
-done:
- mutex_unlock(&vdev->vdev_mutex);
- return mask;
-}
-
-static inline int
-vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
- unsigned long *size, unsigned long *pa)
-{
- struct vop_device *vpdev = vdev->vpdev;
- unsigned long start = MIC_DP_SIZE;
- int i;
-
- /*
- * MMAP interface is as follows:
- * offset region
- * 0x0 virtio device_page
- * 0x1000 first vring
- * 0x1000 + size of 1st vring second vring
- * ....
- */
- if (!offset) {
- *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
- *size = MIC_DP_SIZE;
- return 0;
- }
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- struct vop_vringh *vvr = &vdev->vvr[i];
-
- if (offset == start) {
- *pa = virt_to_phys(vvr->vring.va);
- *size = vvr->vring.len;
- return 0;
- }
- start += vvr->vring.len;
- }
- return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-static int vop_mmap(struct file *f, struct vm_area_struct *vma)
-{
- struct vop_vdev *vdev = f->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
- int i, err;
-
- err = vop_vdev_inited(vdev);
- if (err)
- goto ret;
- if (vma->vm_flags & VM_WRITE) {
- err = -EACCES;
- goto ret;
- }
- while (size_rem) {
- i = vop_query_offset(vdev, offset, &size, &pa);
- if (i < 0) {
- err = -EINVAL;
- goto ret;
- }
- err = remap_pfn_range(vma, vma->vm_start + offset,
- pa >> PAGE_SHIFT, size,
- vma->vm_page_prot);
- if (err)
- goto ret;
- size_rem -= size;
- offset += size;
- }
-ret:
- return err;
-}
-
-static const struct file_operations vop_fops = {
- .open = vop_open,
- .release = vop_release,
- .unlocked_ioctl = vop_ioctl,
- .poll = vop_poll,
- .mmap = vop_mmap,
- .owner = THIS_MODULE,
-};
-
-int vop_host_init(struct vop_info *vi)
-{
- int rc;
- struct miscdevice *mdev;
- struct vop_device *vpdev = vi->vpdev;
-
- INIT_LIST_HEAD(&vi->vdev_list);
- vi->dma_ch = vpdev->dma_ch;
- mdev = &vi->miscdev;
- mdev->minor = MISC_DYNAMIC_MINOR;
- snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index);
- mdev->name = vi->name;
- mdev->fops = &vop_fops;
- mdev->parent = &vpdev->dev;
-
- rc = misc_register(mdev);
- if (rc)
- dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc);
- return rc;
-}
-
-void vop_host_uninit(struct vop_info *vi)
-{
- struct list_head *pos, *tmp;
- struct vop_vdev *vdev;
-
- mutex_lock(&vi->vop_mutex);
- vop_virtio_reset_devices(vi);
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev = list_entry(pos, struct vop_vdev, list);
- list_del(pos);
- reinit_completion(&vdev->destroy);
- mutex_unlock(&vi->vop_mutex);
- mutex_lock(&vdev->vdev_mutex);
- vop_virtio_del_device(vdev);
- vdev->deleted = true;
- mutex_unlock(&vdev->vdev_mutex);
- complete(&vdev->destroy);
- mutex_lock(&vi->vop_mutex);
- }
- mutex_unlock(&vi->vop_mutex);
- misc_deregister(&vi->miscdev);
-}
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 414314151d0a..acb9c81a4e45 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -572,17 +572,6 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
TMIO_MASK_INIT_RCAR2);
}
-/*
- * This is a temporary workaround! This driver used 'hw_reset' wrongly and the
- * fix for that showed a regression. So, we mimic the old behaviour until the
- * proper solution is found.
- */
-static void renesas_sdhi_hw_reset(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- renesas_sdhi_reset(host);
-}
-
#define SH_MOBILE_SDHI_MIN_TAP_ROW 3
static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
@@ -1020,8 +1009,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (of_data && of_data->scc_offset) {
priv->scc_ctl = host->ctl + of_data->scc_offset;
host->reset = renesas_sdhi_reset;
- host->ops.hw_reset = renesas_sdhi_hw_reset;
- host->mmc->caps |= MMC_CAP_HW_RESET;
}
}
@@ -1160,6 +1147,7 @@ int renesas_sdhi_remove(struct platform_device *pdev)
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
+ tmio_mmc_host_free(host);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index a30796e79b1c..6de02f09c322 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -5,6 +5,7 @@
* Copyright (c) 2007 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
* Copyright (c) 2010 Pengutronix e.K.
+ * Copyright 2020 NXP
* Author: Wolfram Sang <kernel@pengutronix.de>
*/
@@ -88,6 +89,7 @@
/* DLL Config 0 Register */
#define ESDHC_DLLCFG0 0x160
#define ESDHC_DLL_ENABLE 0x80000000
+#define ESDHC_DLL_RESET 0x40000000
#define ESDHC_DLL_FREQ_SEL 0x08000000
/* DLL Config 1 Register */
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 0b45eff6fed4..ab5ab969f711 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
+ * Copyright 2020 NXP
*
* Authors: Xiaobo Xie <X.Xie@freescale.com>
* Anton Vorontsov <avorontsov@ru.mvista.com>
@@ -19,6 +20,7 @@
#include <linux/clk.h>
#include <linux/ktime.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include "sdhci-pltfm.h"
@@ -743,6 +745,21 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
temp |= ESDHC_DLL_FREQ_SEL;
sdhci_writel(host, temp, ESDHC_DLLCFG0);
+
+ temp |= ESDHC_DLL_RESET;
+ sdhci_writel(host, temp, ESDHC_DLLCFG0);
+ udelay(1);
+ temp &= ~ESDHC_DLL_RESET;
+ sdhci_writel(host, temp, ESDHC_DLLCFG0);
+
+ /* Wait max 20 ms */
+ if (read_poll_timeout(sdhci_readl, temp,
+ temp & ESDHC_DLL_STS_SLV_LOCK,
+ 10, 20000, false,
+ host, ESDHC_DLLSTAT0))
+ pr_err("%s: timeout for delay chain lock.\n",
+ mmc_hostname(host->mmc));
+
temp = sdhci_readl(host, ESDHC_TBCTL);
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
@@ -1052,6 +1069,17 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
esdhc_tuning_block_enable(host, true);
+ /*
+ * The eSDHC controller takes the data timeout value into account
+ * during tuning. If the SD card is too slow sending the response, the
+ * timer will expire and a "Buffer Read Ready" interrupt without data
+ * is triggered. This leads to tuning errors.
+ *
+ * Just set the timeout to the maximum value because the core will
+ * already take care of it in sdhci_send_tuning().
+ */
+ sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
do {
@@ -1296,6 +1324,8 @@ static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
+ { .family = "QorIQ LX2160A", .revision = "2.0", },
+ { .family = "QorIQ LS1028A", .revision = "1.0", },
{ },
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 592a55a34b58..3561ae8a481a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1384,9 +1384,11 @@ static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
/*
* In case of Version 4.10 or later, use of 'Auto CMD Auto
* Select' is recommended rather than use of 'Auto CMD12
- * Enable' or 'Auto CMD23 Enable'.
+ * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
+ * here because some controllers (e.g sdhci-of-dwmshc) expect it.
*/
- if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (use_cmd12 || use_cmd23)) {
*mode |= SDHCI_TRNS_AUTO_SEL;
ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 2fce0518632d..cb4149fd12e0 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -175,6 +175,8 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
if (host->reset)
host->reset(host);
+ tmio_mmc_abort_dma(host);
+
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
@@ -223,8 +225,6 @@ static void tmio_mmc_reset_work(struct work_struct *work)
/* Ready for new calls */
host->mrq = NULL;
-
- tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
}
@@ -927,6 +927,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
+ /* Downgrade ensures a sane state for tuning HW (e.g. SCC) */
+ if (host->mmc->ops->hs400_downgrade)
+ host->mmc->ops->hs400_downgrade(host->mmc);
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 0e7a9b64301e..e345f9d9f8e8 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -707,6 +707,30 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ u32 csor;
+
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+ } else {
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+ } else {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
nanddev_ntargets(&chip->base));
@@ -910,25 +934,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
return -ENODEV;
}
- /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
- if (csor & CSOR_NAND_ECC_DEC_EN) {
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
-
- /* Hardware generates ECC per 512 Bytes */
- chip->ecc.size = 512;
- if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
- chip->ecc.bytes = 8;
- chip->ecc.strength = 4;
- } else {
- chip->ecc.bytes = 16;
- chip->ecc.strength = 8;
- }
- } else {
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
- }
-
ret = fsl_ifc_sram_init(priv);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index d4200eb2ad32..684c51e5e60d 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1681,6 +1681,11 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
struct mxc_nand_host *host = nand_get_controller_data(chip);
struct device *dev = mtd->dev.parent;
+ chip->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+ chip->ecc.size = 512;
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
+
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_ON_HOST:
chip->ecc.read_page = mxc_nand_read_page;
@@ -1836,19 +1841,7 @@ static int mxcnd_probe(struct platform_device *pdev)
if (host->devtype_data->axi_offset)
host->regs_axi = host->base + host->devtype_data->axi_offset;
- this->ecc.bytes = host->devtype_data->eccbytes;
- host->eccsize = host->devtype_data->eccsize;
-
this->legacy.select_chip = host->devtype_data->select_chip;
- this->ecc.size = 512;
- mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
-
- if (host->pdata.hw_ecc) {
- this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- } else {
- this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
- this->ecc.algo = NAND_ECC_ALGO_HAMMING;
- }
/* NAND bus width determines access functions used by upper layer */
if (host->pdata.width == 2)
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index b31a5818234d..550bda4d1415 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1708,6 +1708,13 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
+ /* Default ECC settings in case they are not set in the device tree */
+ if (!chip->ecc.size)
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+
+ if (!chip->ecc.strength)
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
mtd->oobsize - FMC2_BBM_LEN);
if (ret) {
@@ -1727,8 +1734,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
- if (chip->options & NAND_BUSWIDTH_16)
- stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+ stm32_fmc2_nfc_setup(chip);
return 0;
}
@@ -1952,11 +1958,6 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
NAND_USES_DMA;
- /* Default ECC settings */
- chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- chip->ecc.size = FMC2_ECC_STEP_SIZE;
- chip->ecc.strength = FMC2_ECC_BCH8;
-
/* Scan to find existence of the device */
ret = nand_scan(chip, nand->ncs);
if (ret)
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 0369d98b2d12..f0ae7a01703a 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2701,11 +2701,10 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor)
memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
- if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ if (spi_nor_parse_sfdp(nor, nor->params)) {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
- } else {
- memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
}
}
@@ -3009,13 +3008,15 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
/* already configured from SFDP */
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
- } else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
} else {
nor->addr_width = 3;
}
+ if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ }
+
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b70ded3760f2..6dee4f8f2024 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -512,9 +512,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u8 len = cf->len;
- *len_ptr = len;
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
priv->echo_skb[idx] = NULL;
return skb;
@@ -538,7 +542,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
if (!skb)
return 0;
- netif_rx(skb);
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return len;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 4d594e977497..881799bd9c5e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -217,7 +217,7 @@
* MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes
* VF610 FlexCAN3 ? no yes no yes yes? no
* LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
- * LX2160A FlexCAN3 03.00.23.00 no yes no no yes yes
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -400,19 +400,19 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD,
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -2062,6 +2062,8 @@ static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ device_set_wakeup_enable(&pdev->dev, false);
+ device_set_wakeup_capable(&pdev->dev, false);
unregister_flexcandev(dev);
pm_runtime_disable(&pdev->dev);
free_candev(dev);
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 10aa3e457c33..40c33b8a5fda 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,8 +262,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
/* if this frame is an echo, */
- if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
- !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
+ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
@@ -277,7 +276,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
netif_wake_queue(priv->ndev);
spin_unlock_irqrestore(&priv->echo_lock, flags);
- return 0;
+
+ /* if this frame is only an echo, stop here. Otherwise,
+ * continue to push this application self-received frame into
+ * its own rx queue.
+ */
+ if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
+ return 0;
}
/* otherwise, it should be pushed into rx fifo */
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index 3b180269a92d..6e95193b215b 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -245,7 +245,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
@@ -290,7 +290,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index c3f49543ff26..9c215f7c5f81 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -75,11 +75,11 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
case MCP251XFD_MODEL_MCP2517FD:
- return "MCP2517FD"; break;
+ return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
- return "MCP2518FD"; break;
+ return "MCP2518FD";
case MCP251XFD_MODEL_MCP251XFD:
- return "MCP251xFD"; break;
+ return "MCP251xFD";
}
return "<unknown>";
@@ -95,21 +95,21 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
{
switch (mode) {
case MCP251XFD_REG_CON_MODE_MIXED:
- return "Mixed (CAN FD/CAN 2.0)"; break;
+ return "Mixed (CAN FD/CAN 2.0)";
case MCP251XFD_REG_CON_MODE_SLEEP:
- return "Sleep"; break;
+ return "Sleep";
case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
- return "Internal Loopback"; break;
+ return "Internal Loopback";
case MCP251XFD_REG_CON_MODE_LISTENONLY:
- return "Listen Only"; break;
+ return "Listen Only";
case MCP251XFD_REG_CON_MODE_CONFIG:
- return "Configuration"; break;
+ return "Configuration";
case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
- return "External Loopback"; break;
+ return "External Loopback";
case MCP251XFD_REG_CON_MODE_CAN2_0:
- return "CAN 2.0"; break;
+ return "CAN 2.0";
case MCP251XFD_REG_CON_MODE_RESTRICTED:
- return "Restricted Operation"; break;
+ return "Restricted Operation";
}
return "<unknown>";
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index ba25902dd78c..314f868b3465 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -173,7 +173,7 @@ mcp251xfd_regmap_nocrc_read(void *context,
memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
if (MCP251XFD_SANITIZE_SPI)
memset(buf_tx->data, 0x0, val_len);
- };
+ }
err = spi_sync(spi, &msg);
if (err)
@@ -330,17 +330,17 @@ mcp251xfd_regmap_crc_read(void *context,
goto out;
}
- netdev_dbg(priv->ndev,
- "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
- reg, val_len, (int)val_len, buf_rx->data,
- get_unaligned_be16(buf_rx->data + val_len));
- }
-
- if (err) {
netdev_info(priv->ndev,
- "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
reg, val_len, (int)val_len, buf_rx->data,
get_unaligned_be16(buf_rx->data + val_len));
+ }
+
+ if (err) {
+ netdev_err(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
return err;
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 1d63006c97bc..9913f5458279 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -933,7 +933,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_release_clk;
}
priv->offload.mailbox_read = ti_hecc_mailbox_read;
@@ -942,7 +942,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
err = can_rx_offload_add_timestamp(ndev, &priv->offload);
if (err) {
dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_disable_clk;
}
err = register_candev(ndev);
@@ -960,7 +960,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
probe_exit_offload:
can_rx_offload_del(&priv->offload);
-probe_exit_clk:
+probe_exit_disable_clk:
+ clk_disable_unprepare(priv->clk);
+probe_exit_release_clk:
clk_put(priv->clk);
probe_exit_candev:
free_candev(ndev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d91df34e7fa8..c2764799f9ef 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
/* protect from getting time before setting now */
if (ktime_to_ns(time_ref->tv_host)) {
u64 delta_us;
+ s64 delta_ts = 0;
+
+ /* General case: dev_ts_1 < dev_ts_2 < ts, with:
+ *
+ * - dev_ts_1 = previous sync timestamp
+ * - dev_ts_2 = last sync timestamp
+ * - ts = event timestamp
+ * - ts_period = known sync period (theoretical)
+ * ~ dev_ts2 - dev_ts1
+ * *but*:
+ *
+ * - time counters wrap (see adapter->ts_used_bits)
+ * - sometimes, dev_ts_1 < ts < dev_ts2
+ *
+ * "normal" case (sync time counters increase):
+ * must take into account case when ts wraps (tsw)
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+----+-------0-+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * ts tsw
+ */
+ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
+ /* case when event time (tsw) wraps */
+ if (ts < time_ref->ts_dev_1)
+ delta_ts = 1 << time_ref->adapter->ts_used_bits;
+
+ /* Otherwise, sync time counter (ts_dev_2) has wrapped:
+ * handle case when event time (tsn) hasn't.
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+--0-+---------+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * tsn ts
+ */
+ } else if (time_ref->ts_dev_1 < ts) {
+ delta_ts = -(1 << time_ref->adapter->ts_used_bits);
+ }
- delta_us = ts - time_ref->ts_dev_2;
- if (ts < time_ref->ts_dev_2)
- delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ /* add delay between last sync and event timestamps */
+ delta_ts += (signed int)(ts - time_ref->ts_dev_2);
- delta_us += time_ref->ts_total;
+ /* add time from beginning to last sync */
+ delta_ts += time_ref->ts_total;
- delta_us *= time_ref->adapter->us_per_ts_scale;
+ /* convert ticks number into microseconds */
+ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
delta_us >>= time_ref->adapter->us_per_ts_shift;
*time = ktime_add_us(time_ref->tv_host_0, delta_us);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ab63fd9eb982..d29d20525588 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct canfd_frame *cfd;
struct sk_buff *skb;
const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+ if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_msg_get_channel(rm)];
+ netdev = dev->netdev;
+
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
/* CANFD frame case */
skb = alloc_canfd_skb(netdev, &cfd);
@@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
enum can_state rx_state, tx_state;
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+ netdev = dev->netdev;
+
/* nothing should be sent while in BUS_OFF state */
if (dev->can.state == CAN_STATE_BUS_OFF)
return 0;
@@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
+ struct peak_usb_device *dev;
+
+ if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
/* keep a trace of tx and rx error counters for later use */
pdev->bec.txerr = er->tx_err_cnt;
@@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+ netdev = dev->netdev;
+
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
if (!skb)
@@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
u16 tx_msg_size, tx_msg_flags;
u8 can_dlc;
+ if (cfd->len > CANFD_MAX_DLEN)
+ return -EINVAL;
+
tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
tx_msg->size = cpu_to_le16(tx_msg_size);
tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 6c4d00d2dbdc..48d746e18f30 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1395,7 +1395,7 @@ static int xcan_open(struct net_device *ndev)
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
- return ret;
+ goto err;
}
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
@@ -1479,6 +1479,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
+ pm_runtime_put(priv->dev);
return ret;
}
@@ -1793,7 +1794,7 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
- goto err_pmdisable;
+ goto err_disableclks;
}
if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
@@ -1828,7 +1829,6 @@ static int xcan_probe(struct platform_device *pdev)
err_disableclks:
pm_runtime_put(priv->dev);
-err_pmdisable:
pm_runtime_disable(&pdev->dev);
err_free:
free_candev(ndev);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 10cd1bfd81a0..ade04c036fd9 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -393,8 +393,10 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_fid_map(chip, fid_bitmap);
- if (err)
+ if (err) {
+ kfree(table);
goto out;
+ }
while (1) {
fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 53064e0e1618..5bdac669a339 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1219,8 +1219,8 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
priv->port_mtu[port] = new_mtu;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[port] > mtu)
- mtu = priv->port_mtu[port];
+ if (priv->port_mtu[i] > mtu)
+ mtu = priv->port_mtu[i];
/* Include L2 header / FCS length */
qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fa147865e33f..7975f59735d6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1160,16 +1160,6 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
schedule_work(&bp->sp_task);
}
-static void bnxt_cancel_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- flush_workqueue(bnxt_pf_wq);
- } else {
- cancel_work_sync(&bp->sp_task);
- cancel_delayed_work_sync(&bp->fw_reset_task);
- }
-}
-
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
@@ -4362,7 +4352,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
- if (BNXT_NO_FW_ACCESS(bp))
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
return -EBUSY;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
@@ -9789,7 +9780,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
+ rc = -EIO;
+ if (!rc)
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
dev_close(bp->dev);
@@ -12108,15 +12102,17 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
-
- bnxt_dl_fw_reporters_destroy(bp, true);
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ /* Flush any pending tasks */
+ cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ bp->sp_event = 0;
+
+ bnxt_dl_fw_reporters_destroy(bp, true);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -12860,6 +12856,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ if (state == pci_channel_io_frozen)
+ set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+
if (netif_running(netdev))
bnxt_close(netdev);
@@ -12886,7 +12885,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0;
+ int err = 0, off;
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12898,6 +12897,20 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ /* Upon fatal error, our device internal logic that latches to
+ * BAR value is getting reset and will restore only upon
+ * rewritting the BARs.
+ *
+ * As pci_restore_state() does not re-write the BARs if the
+ * value is same as saved value earlier, driver needs to
+ * write the BARs to 0 to force restore, in case of fatal error.
+ */
+ if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
+ &bp->state)) {
+ for (off = PCI_BASE_ADDRESS_0;
+ off <= PCI_BASE_ADDRESS_5; off += 4)
+ pci_write_config_dword(bp->pdev, off, 0);
+ }
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 21ef1c21f602..47b3c3127879 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1781,6 +1781,7 @@ struct bnxt {
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 883e47c5b1a7..286f0341bdf8 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1929,7 +1929,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
- bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3352dad6ca99..27308600da15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2124,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 send_len);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0273f40b85f7..17410fe86626 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3573,6 +3573,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "TX sw fallback : %20llu\n",
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
while (i < MAX_NPORTS) {
ktls_port = &adap->ch_ktls_stats.ktls_port[i];
seq_printf(seq, "Port %d\n", i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 6ec5f2f26f05..4e55f7081644 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
- err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
- if (err)
- goto smac_err;
-
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
+ if (err)
+ goto smac_err;
+
+ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (!err)
return 0;
@@ -862,6 +862,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@@ -879,7 +880,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
+ fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
@@ -1323,11 +1324,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
@@ -1363,11 +1361,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static int cxgb4_set_hash_filter(struct net_device *dev,
@@ -2039,6 +2034,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
}
return;
}
+ switch (f->fs.action) {
+ case FILTER_PASS:
+ if (f->fs.dirsteer)
+ set_tcb_tflag(adap, f, tid,
+ TF_DIRECT_STEER_S, 1, 1);
+ break;
+ case FILTER_DROP:
+ set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
+ break;
+ case FILTER_SWITCH:
+ set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
+ break;
+ }
+
break;
default:
@@ -2106,22 +2115,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
- int err = 0;
-
- if (f->fs.newsmac)
- err = configure_filter_smac(adap, f);
-
- if (!err) {
- f->pending = 0; /* async setup completed */
- f->valid = 1;
- if (ctx) {
- ctx->result = 0;
- ctx->tid = idx;
- }
- } else {
- clear_filter(adap, f);
- if (ctx)
- ctx->result = err;
+ f->pending = 0; /* async setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
}
} else {
/* Something went wrong. Issue a warning about the
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a952fe198eb9..7fd264a6d085 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
+ cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b169776ab484..1b49f2fa9b18 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -388,6 +388,7 @@ struct ch_ktls_stats_debug {
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_fallback;
};
#endif
@@ -493,6 +494,11 @@ struct cxgb4_uld_info {
#endif
};
+static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a9e9c7ae565d..196652a114c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
}
EXPORT_SYMBOL(cxgb4_write_sgl);
+/* cxgb4_write_partial_sgl - populate SGL for partial packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ * @start: start offset in the SKB where partial data starts
+ * @len: length of data from @start to send out
+ *
+ * This API will handle sending out partial data of a skb if required.
+ * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
+ * and @len will decide how much data after @start offset to send out.
+ */
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 len)
+{
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u8 i = 0, frag_idx = 0, nfrags = 0;
+ skb_frag_t *frag;
+
+ /* Fill the first SGL either from linear data or from partial
+ * frag based on @start.
+ */
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ sgl->len0 = htonl(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ len -= frag_size;
+ nfrags++;
+ } else {
+ start -= skb_linear_data_len;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ /* find the first frag */
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+
+ frag_size = min(len, skb_frag_size(frag) - start);
+ sgl->len0 = cpu_to_be32(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+
+ /* If the entire partial data fit in one SGL, then send it out
+ * now.
+ */
+ if (!len)
+ goto done;
+
+ /* Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ /* If the skb couldn't fit in first SGL completely, fill the
+ * rest of the frags in subsequent SGLs. Note that each SGL
+ * pair can store 2 frags.
+ */
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ to->len[i & 1] = cpu_to_be32(frag_size);
+ to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
+ if (i && (i & 1))
+ to++;
+ nfrags++;
+ frag_idx++;
+ i++;
+ len -= frag_size;
+ }
+
+ /* If we ended in an odd boundary, then set the second SGL's
+ * length in the pair to 0.
+ */
+ if (i & 1)
+ to->len[1] = cpu_to_be32(0);
+
+ /* Copy from temporary buffer to Tx ring, in case we hit the
+ * end of the queue in the middle of writing the SGL.
+ */
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+
+ /* 0-pad to multiple of 16 */
+ if ((uintptr_t)end & 8)
+ *end = 0;
+done:
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
+}
+EXPORT_SYMBOL(cxgb4_write_partial_sgl);
+
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
* data from the FIFO instead of from Host.
@@ -1422,7 +1530,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
- if (skb->decrypted)
+ if (cxgb4_is_ktls_skb(skb) &&
+ (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 50232e063f49..92473dda55d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -50,6 +50,10 @@
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+#define TF_DROP_S 22
+#define TF_DIRECT_STEER_S 23
+#define TF_LPBK_S 59
+
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 5195f692f14d..c24485c0d512 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -14,6 +14,50 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
+ * @skb: skb
+ * @start: start offset.
+ * @len: how much data to send after @start
+ */
+static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+{
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ u8 nfrags = 0, frag_idx = 0;
+ skb_frag_t *frag;
+
+ /* if its a linear skb then return 1 */
+ if (!skb_is_nonlinear(skb))
+ return 1;
+
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ start = 0;
+ } else {
+ start -= skb_linear_data_len;
+
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+ frag_size = min(len, skb_frag_size(frag) - start);
+ }
+ len -= frag_size;
+ nfrags++;
+
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+ return nfrags;
+}
+
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
@@ -689,7 +733,8 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
}
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
- u32 tid, void *pos, u16 word, u64 mask,
+ u32 tid, void *pos, u16 word,
+ struct sge_eth_txq *q, u64 mask,
u64 val, u32 reply)
{
struct cpl_set_tcb_field_core *cpl;
@@ -698,7 +743,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
/* ULP_TXPKT */
txpkt = pos;
- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
/* ULPTX_IDATA sub-command */
@@ -753,7 +801,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} else {
u8 buf[48] = {0};
- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
mask, val, reply);
return chcr_copy_to_txd(buf, &q->q, pos,
@@ -761,7 +809,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
}
}
- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
mask, val, reply);
/* check again if we are at the end of the queue */
@@ -783,11 +831,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
*/
static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u64 tcp_seq,
- u64 tcp_ack, u64 tcp_win)
+ u64 tcp_ack, u64 tcp_win, bool offset)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
struct ch_ktls_port_stats_debug *port_stats;
- u32 len, cpl = 0, ndesc, wr_len;
+ u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
struct fw_ulptx_wr *wr;
int credits;
void *pos;
@@ -803,6 +851,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
/* make space for WR, we'll fill it later when we know all the cpls
* being sent out and have complete length.
@@ -818,7 +871,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
cpl++;
}
/* reset snd una if it's a re-transmit pkt */
- if (tcp_seq != tx_info->prev_seq) {
+ if (tcp_seq != tx_info->prev_seq || offset) {
/* reset snd_una */
port_stats =
&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -827,7 +880,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&port_stats->ktls_tx_ooo);
+ if (tcp_seq != tx_info->prev_seq)
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
@@ -856,7 +910,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+ wr->flowid_len16 = htonl(wr_mid |
+ FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
ndesc = DIV_ROUND_UP(len, 64);
chcr_txq_advance(&q->q, ndesc);
@@ -866,34 +921,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
}
/*
- * chcr_ktls_skb_copy
- * @nskb - new skb where the frags to be added.
- * @skb - old skb from which frags will be copied.
- */
-static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
- __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
- }
-
- skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
- nskb->len += skb->data_len;
- nskb->data_len = skb->data_len;
- nskb->truesize += skb->data_len;
-}
-
-/*
* chcr_ktls_get_tx_flits
* returns number of flits to be sent out, it includes key context length, WR
* size and skb fragments.
*/
static unsigned int
-chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
{
- return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ return chcr_sgl_len(nr_frags) +
DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
}
@@ -957,8 +992,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct tcphdr *tcp;
int len16, pktlen;
struct iphdr *ip;
+ u32 wr_mid = 0;
int credits;
u8 buf[150];
+ u64 cntrl1;
void *pos;
iplen = skb_network_header_len(skb);
@@ -967,7 +1004,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb->len - skb->data_len;
+ pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -980,6 +1017,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
wr = pos;
@@ -987,7 +1029,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl));
- wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0;
cpl = (void *)(wr + 1);
@@ -997,22 +1039,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0;
cpl->len = htons(pktlen);
- /* checksum offload */
- cpl->ctrl1 = 0;
-
- pos = cpl + 1;
memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif
}
+
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+
+ pos = cpl + 1;
+
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
* need to update the tcp sequence number as per the last packet.
@@ -1031,71 +1079,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return 0;
}
-/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
- * @tgt- buffer into which tail data gets added
- * @skb- buffer from which the paged data comes from
- * @shiftlen- shift up to this many bytes
- */
-static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen)
-{
- skb_frag_t *fragfrom, *fragto;
- int from, to, todo;
-
- WARN_ON(shiftlen > skb->data_len);
-
- todo = shiftlen;
- from = 0;
- to = 0;
- fragfrom = &skb_shinfo(skb)->frags[from];
-
- while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
- fragfrom = &skb_shinfo(skb)->frags[from];
- fragto = &skb_shinfo(tgt)->frags[to];
-
- if (todo >= skb_frag_size(fragfrom)) {
- *fragto = *fragfrom;
- todo -= skb_frag_size(fragfrom);
- from++;
- to++;
-
- } else {
- __skb_frag_ref(fragfrom);
- skb_frag_page_copy(fragto, fragfrom);
- skb_frag_off_copy(fragto, fragfrom);
- skb_frag_size_set(fragto, todo);
-
- skb_frag_off_add(fragfrom, todo);
- skb_frag_size_sub(fragfrom, todo);
- todo = 0;
-
- to++;
- break;
- }
- }
-
- /* Ready to "commit" this state change to tgt */
- skb_shinfo(tgt)->nr_frags = to;
-
- /* Reposition in the original skb */
- to = 0;
- while (from < skb_shinfo(skb)->nr_frags)
- skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
-
- skb_shinfo(skb)->nr_frags = to;
-
- WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
-
- skb->len -= shiftlen;
- skb->data_len -= shiftlen;
- skb->truesize -= shiftlen;
- tgt->len += shiftlen;
- tgt->data_len += shiftlen;
- tgt->truesize += shiftlen;
-
- return shiftlen;
-}
-
/*
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
* received has partial end part of the record, send out the complete record, so
@@ -1111,6 +1094,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 tcp_seq,
+ bool is_last_wr, u32 data_len,
+ u32 skb_offset, u32 nfrags,
bool tcp_push, u32 mss)
{
u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
@@ -1126,7 +1111,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
u64 *end;
/* get the number of flits required */
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
/* number of descriptors */
ndesc = chcr_flits_to_desc(flits);
/* check if enough credits available */
@@ -1155,6 +1140,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (!is_last_wr)
+ skb_get(skb);
+
pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits;
/* FW_ULPTX_WR */
@@ -1187,7 +1175,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
- cpl->pldlen = htonl(skb->data_len);
+ cpl->pldlen = htonl(data_len);
/* encryption should start after tls header size + iv size */
cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
@@ -1229,7 +1217,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
/* CPL_TX_DATA */
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
- tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
tx_data->rsvd = htonl(tcp_seq);
@@ -1249,8 +1237,8 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
}
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1282,10 +1270,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
struct sge_eth_txq *q,
u32 tcp_seq, bool tcp_push, u32 mss,
u32 tls_rec_offset, u8 *prior_data,
- u32 prior_data_len)
+ u32 prior_data_len, u32 data_len,
+ u32 skb_offset)
{
+ u32 len16, wr_mid = 0, cipher_start, nfrags;
struct adapter *adap = tx_info->adap;
- u32 len16, wr_mid = 0, cipher_start;
unsigned int flits = 0, ndesc;
int credits, left, last_desc;
struct tx_sw_desc *sgl_sdesc;
@@ -1298,10 +1287,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
void *pos;
u64 *end;
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
/* get the number of flits required, it's a partial record so 2 flits
* (AES_BLOCK_SIZE) will be added.
*/
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
/* get the correct 8 byte IV of this record */
iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
/* If it's a middle record and not 16 byte aligned to run AES CTR, need
@@ -1373,7 +1363,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
- cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
cpl->aadstart_cipherstop_hi =
htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
cpl->cipherstop_lo_authinsert = 0;
@@ -1404,7 +1394,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
if (tcp_push)
@@ -1437,8 +1427,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
if (prior_data_len)
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1466,6 +1456,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, u32 tcp_seq, u32 mss,
bool tcp_push, struct sge_eth_txq *q,
u32 port_id, u8 *prior_data,
+ u32 data_len, u32 skb_offset,
u32 prior_data_len)
{
int credits, left, len16, last_desc;
@@ -1475,14 +1466,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct ulptx_idata *idata;
struct ulp_txpkt *ulptx;
struct fw_ulptx_wr *wr;
- u32 wr_mid = 0;
+ u32 wr_mid = 0, nfrags;
void *pos;
u64 *end;
flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
- flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
+ flits += chcr_sgl_len(nfrags);
if (prior_data_len)
flits += 2;
+
/* WR will need len16 */
len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */
@@ -1535,7 +1528,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
tx_data = (struct cpl_tx_data *)(idata + 1);
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
/* set tcp seq number */
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
@@ -1559,8 +1552,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
end = pos + left;
}
/* send the complete packet including the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1568,12 +1561,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
return 0;
}
+static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct sge_eth_txq *q)
+{
+ u32 ctrl, iplen, maclen, wr_mid = 0, len16;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ unsigned int flits, ndesc;
+ int credits, last_desc;
+ u64 cntrl1, *end;
+ void *pos;
+
+ ctrl = sizeof(*cpl);
+ flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
+
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return -ENOMEM;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return -ENOMEM;
+ }
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
+ TXPKT_INTF_V(tx_info->tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
+ TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+ cpl->len = htons(skb->len);
+
+ pos = cpl + 1;
+
+ cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
/*
* chcr_ktls_copy_record_in_skb
* @nskb - new skb where the frags to be added.
+ * @skb - old skb, to copy socket and destructor details.
* @record - specific record which has complete 16k record in frags.
*/
static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct sk_buff *skb,
struct tls_record_info *record)
{
int i = 0;
@@ -1588,6 +1665,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
nskb->data_len = record->len;
nskb->len += record->len;
nskb->truesize += record->len;
+ nskb->sk = skb->sk;
+ nskb->destructor = skb->destructor;
+ refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
}
/*
@@ -1659,7 +1739,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
- struct sge_eth_txq *q,
+ struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
struct sk_buff *nskb = NULL;
@@ -1668,30 +1748,37 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
nskb = skb;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else {
- dev_kfree_skb_any(skb);
-
- nskb = alloc_skb(0, GFP_KERNEL);
- if (!nskb)
+ nskb = alloc_skb(0, GFP_ATOMIC);
+ if (!nskb) {
+ dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
+ }
+
/* copy complete record in skb */
- chcr_ktls_copy_record_in_skb(nskb, record);
+ chcr_ktls_copy_record_in_skb(nskb, skb, record);
/* packet is being sent from the beginning, update the tcp_seq
* accordingly.
*/
tcp_seq = tls_record_start_seq(record);
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
+ /* reset skb offset */
+ skb_offset = 0;
+
+ if (last_wr)
+ dev_kfree_skb_any(skb);
+
+ last_wr = true;
+
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
}
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ last_wr, record->len, skb_offset,
+ record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
goto out;
}
+ tx_info->prev_seq = record->end_seq;
return 0;
out:
dev_kfree_skb_any(nskb);
@@ -1723,41 +1810,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ u32 data_len, u32 skb_offset,
struct sge_eth_txq *q, u32 tls_end_offset)
{
u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
u8 prior_data[16] = {0};
u32 prior_data_len = 0;
- u32 data_len;
/* check if the skb is ending in middle of tag/HASH, its a big
* trouble, send the packet before the HASH.
*/
- int remaining_record = tls_end_offset - skb->data_len;
+ int remaining_record = tls_end_offset - data_len;
if (remaining_record > 0 &&
remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
- int trimmed_len = skb->data_len -
- (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
- struct sk_buff *tmp_skb = NULL;
- /* don't process the pkt if it is only a partial tag */
- if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
- goto out;
+ int trimmed_len = 0;
- WARN_ON(trimmed_len > skb->data_len);
+ if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ trimmed_len = data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
+ remaining_record);
+ if (!trimmed_len)
+ return FALLBACK;
- /* shift to those many bytes */
- tmp_skb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!tmp_skb))
- goto out;
+ WARN_ON(trimmed_len > data_len);
- chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
- /* free the last trimmed portion */
- dev_kfree_skb_any(skb);
- skb = tmp_skb;
+ data_len = trimmed_len;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
}
- data_len = skb->data_len;
+
+ /* check if it is only the header part. */
+ if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id, prior_data,
+ data_len, skb_offset, prior_data_len))
+ goto out;
+
+ tx_info->prev_seq = tcp_seq + data_len;
+ return 0;
+ }
+
/* check if the middle record's start point is 16 byte aligned. CTR
* needs 16 byte aligned start point to start encryption.
*/
@@ -1818,9 +1911,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
- /* include prio_data_len for further calculation.
- */
- data_len += prior_data_len;
}
/* reset snd una, so the middle record won't send the already
* sent part.
@@ -1829,37 +1919,54 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
- /* Else means, its a partial first part of the record. Check if
- * its only the header, don't need to send for encryption then.
- */
- if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
- if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
- tcp_push_no_fin, q,
- tx_info->port_id,
- prior_data,
- prior_data_len)) {
- goto out;
- }
- return 0;
- }
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
}
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
mss, tls_rec_offset, prior_data,
- prior_data_len)) {
+ prior_data_len, data_len, skb_offset)) {
goto out;
}
+ tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
return 0;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+static int chcr_ktls_sw_fallback(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ u32 data_len, skb_offset;
+ struct sk_buff *nskb;
+ struct tcphdr *th;
+
+ nskb = tls_encrypt_skb(skb);
+
+ if (!nskb)
+ return 0;
+
+ th = tcp_hdr(nskb);
+ skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ data_len = nskb->len - skb_offset;
+ skb_tx_timestamp(nskb);
+
+ if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
+ goto out;
+
+ tx_info->prev_seq = ntohl(th->seq) + data_len;
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return 0;
+}
/* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
@@ -1867,20 +1974,17 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
- u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx;
- struct sk_buff *local_skb;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
+ skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_data_len = skb->len - skb_offset;
+ data_len = skb_data_len;
- mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
-
- /* check if we haven't set it for ktls offload */
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != dev))
@@ -1892,14 +1996,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info))
goto out;
- /* don't touch the original skb, make a new skb to extract each records
- * and send them separately.
- */
- local_skb = alloc_skb(0, GFP_KERNEL);
-
- if (unlikely(!local_skb))
- return NETDEV_TX_BUSY;
-
adap = tx_info->adap;
stats = &adap->ch_ktls_stats;
port_stats = &stats->ktls_port[tx_info->port_id];
@@ -1914,20 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (ret)
return NETDEV_TX_BUSY;
}
- /* update tcb */
- ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
- ntohl(th->ack_seq),
- ntohs(th->window));
- if (ret) {
- dev_kfree_skb_any(local_skb);
- return NETDEV_TX_BUSY;
- }
- /* copy skb contents into local skb */
- chcr_ktls_skb_copy(skb, local_skb);
-
- /* go through the skb and send only one record at a time. */
- data_len = skb->data_len;
/* TCP segments can be in received either complete or partial.
* chcr_end_part_handler will handle cases if complete record or end
* part of the record is received. Incase of partial end part of record,
@@ -1952,10 +2035,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* update tcb for the skb */
+ if (skb_data_len == data_len) {
+ u32 tx_max = tcp_seq;
+
+ if (!tls_record_is_start_marker(record) &&
+ tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ tx_max = record->end_seq -
+ TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
+ ntohl(th->ack_seq),
+ ntohs(th->window),
+ tls_end_offset !=
+ record->len);
+ if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock,
+ flags);
+ goto out;
+ }
+
+ if (th->fin)
+ skb_get(skb);
+ }
+
if (unlikely(tls_record_is_start_marker(record))) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
- goto out;
+ /* If tls_end_offset < data_len, means there is some
+ * data after start marker, which needs encryption, send
+ * plaintext first and take skb refcount. else send out
+ * complete pkt as plaintext.
+ */
+ if (tls_end_offset < data_len)
+ skb_get(skb);
+ else
+ tls_end_offset = data_len;
+
+ ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tx_info->port_id, NULL,
+ tls_end_offset, skb_offset,
+ 0);
+
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ if (ret) {
+ /* free the refcount taken earlier */
+ if (tls_end_offset < data_len)
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ data_len -= tls_end_offset;
+ tcp_seq = record->end_seq;
+ skb_offset += tls_end_offset;
+ continue;
}
/* increase page reference count of the record, so that there
@@ -1967,73 +2104,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- tls_end_offset = record->end_seq - tcp_seq;
- pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
- tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
- struct sk_buff *nskb = NULL;
-
- if (tls_end_offset < data_len) {
- nskb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!nskb)) {
- ret = -ENOMEM;
- goto clear_ref;
- }
-
- chcr_ktls_skb_shift(nskb, local_skb,
- tls_end_offset);
- } else {
- /* its the only record in this skb, directly
- * point it.
- */
- nskb = local_skb;
- }
- ret = chcr_end_part_handler(tx_info, nskb, record,
+ ret = chcr_end_part_handler(tx_info, skb, record,
tcp_seq, mss,
(!th->fin && th->psh), q,
+ skb_offset,
tls_end_offset,
- (nskb == local_skb));
-
- if (ret && nskb != local_skb)
- dev_kfree_skb_any(local_skb);
+ skb_offset +
+ tls_end_offset == skb->len);
data_len -= tls_end_offset;
/* tcp_seq increment is required to handle next record.
*/
tcp_seq += tls_end_offset;
+ skb_offset += tls_end_offset;
} else {
- ret = chcr_short_record_handler(tx_info, local_skb,
+ ret = chcr_short_record_handler(tx_info, skb,
record, tcp_seq, mss,
(!th->fin && th->psh),
+ data_len, skb_offset,
q, tls_end_offset);
data_len = 0;
}
-clear_ref:
+
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */
- if (ret)
- goto out;
+ if (ret) {
+ if (th->fin)
+ dev_kfree_skb_any(skb);
+
+ if (ret == FALLBACK)
+ return chcr_ktls_sw_fallback(skb, tx_info, q);
+
+ return NETDEV_TX_OK;
+ }
+
/* length should never be less than 0 */
WARN_ON(data_len < 0);
} while (data_len > 0);
- tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
- atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
+ atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options
* as well.
*/
- if (th->fin)
+ if (th->fin) {
chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index c1651b1431a0..18b3b1f02415 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -26,6 +26,7 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu))
+#define FALLBACK 35
enum ch_ktls_open_state {
CH_KTLS_OPEN_SUCCESS = 0,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index ec4f79049a06..96d561653496 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -212,7 +212,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
- refcount_add(2, &skb->users);
+ refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}
@@ -772,14 +772,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
@@ -796,15 +795,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
-
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static void chtls_purge_wr_queue(struct sock *sk)
@@ -1514,7 +1511,6 @@ static void add_to_reap_list(struct sock *sk)
struct chtls_sock *csk = sk->sk_user_data;
local_bh_disable();
- bh_lock_sock(sk);
release_tcp_port(sk); /* release the port immediately */
spin_lock(&reap_list_lock);
@@ -1523,7 +1519,6 @@ static void add_to_reap_list(struct sock *sk)
if (!csk->passive_reap_next)
schedule_work(&reap_task);
spin_unlock(&reap_list_lock);
- bh_unlock_sock(sk);
local_bh_enable();
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index f1820aca0d33..62c829023da5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -383,6 +383,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
if (ret)
goto out_notcb;
+ if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+ goto out_notcb;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 9fb5ca6682ea..188d871f6b8c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1585,6 +1585,7 @@ skip_copy:
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
+ struct sk_buff *next_skb;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
@@ -1595,8 +1596,10 @@ skip_copy:
chtls_free_skb(sk, skb);
buffers_freed++;
hws->copied_seq = 0;
- if (copied >= target &&
- !skb_peek(&sk->sk_receive_queue))
+ next_skb = skb_peek(&sk->sk_receive_queue);
+ if (copied >= target && !next_skb)
+ break;
+ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
break;
}
} while (len > 0);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 06cc863f4dd6..d9c285948fc2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -174,12 +174,17 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ + DPAA_HASH_RESULTS_SIZE)
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
#ifdef CONFIG_DPAA_ERRATUM_A050385
-#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
- + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
#else
-#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
- dpaa_rx_extra_headroom)
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -2840,7 +2845,8 @@ out_error:
return err;
}
-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
+ enum port_type port)
{
u16 headroom;
@@ -2854,10 +2860,12 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
*
* Also make sure the headroom is a multiple of data_align bytes
*/
- headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
- DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
- return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ if (port == RX)
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
+ else
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
@@ -3025,8 +3033,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
goto free_dpaa_fqs;
}
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
/* All real interfaces need their ports initialized */
err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index c81be32bcedf..827f74e86d34 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -402,7 +402,7 @@ struct enetc_psfp_gate {
u32 num_entries;
refcount_t refcount;
struct hlist_node node;
- struct action_gate_entry entries[0];
+ struct action_gate_entry entries[];
};
/* Only enable the green color frame now
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 832a2175636d..c527f4ee1d3a 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -456,6 +456,12 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_FRREG (1 << 16)
+/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
+ * the generation of an MII event. This must be avoided in the older
+ * FEC blocks where it will stop MII events being generated.
+ */
+#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8f7eca1e7716..d7919555250d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,14 +100,14 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -119,7 +119,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -127,7 +128,7 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
};
static struct platform_device_id fec_devtype[] = {
@@ -2134,15 +2135,17 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (suppress_preamble)
fep->phy_speed |= BIT(7);
- /* Clear MMFR to avoid to generate MII event by writing MSCR.
- * MII event generation condition:
- * - writing MSCR:
- * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
- * mscr_reg_data_in[7:0] != 0
- * - writing MMFR:
- * - mscr[7:0]_not_zero
- */
- writel(0, fep->hwp + FEC_MII_DATA);
+ if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+ }
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 41dd3d0f3452..d391a45cebb6 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1829,20 +1829,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, fcb_len);
- if (!skb_new) {
+ if (fcb_len) {
+ if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-
- if (skb->sk)
- skb_set_owner_w(skb_new, skb->sk);
- dev_consume_skb_any(skb);
- skb = skb_new;
}
/* total number of fragments in the SKB */
@@ -3380,7 +3372,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 714b501be7d0..ba8869c3d891 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1358,7 +1358,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
upsmr |= UCC_GETH_UPSMR_SGMM;
out_be32(&uf_regs->upsmr, upsmr);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 281de8326bc5..015796a20118 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -198,7 +198,7 @@ static_assert(sizeof(struct stats) == 16);
struct gve_stats_report {
__be64 written_count;
- struct stats stats[0];
+ struct stats stats[];
};
static_assert(sizeof(struct gve_stats_report) == 8);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 48a433154ce0..02e7d74779f4 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -116,9 +116,8 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
priv->tx_cfg.num_queues;
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues;
- priv->stats_report_len = sizeof(struct gve_stats_report) +
- (tx_stats_num + rx_stats_num) *
- sizeof(struct stats);
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+ tx_stats_num + rx_stats_num);
priv->stats_report =
dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
&priv->stats_report_bus, GFP_KERNEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 15f69fa86323..e8495f58a1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1373,7 +1373,7 @@ static int hclge_tm_bp_setup(struct hclge_dev *hdev)
return ret;
}
- return ret;
+ return 0;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 50c84c5e65d2..c8e3fdd5999c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3262,8 +3262,8 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
- hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7ef3369953b6..c3ec9ceed833 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1031,12 +1031,6 @@ static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
ret = -EOPNOTSUPP;
}
- if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
- netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
- netdev->stats.tx_dropped++;
- ret = -EOPNOTSUPP;
- }
-
return ret;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 8148f796a807..da15913879f8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1185,18 +1185,27 @@ static int ibmvnic_open(struct net_device *netdev)
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc)
- return rc;
+ goto out;
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- return rc;
+ goto out;
}
}
rc = __ibmvnic_open(netdev);
+out:
+ /*
+ * If open fails due to a pending failover, set device state and
+ * return. Device operation will be handled by reset routine.
+ */
+ if (rc && adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ rc = 0;
+ }
return rc;
}
@@ -1815,9 +1824,13 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
int rc;
rc = 0;
- ether_addr_copy(adapter->mac_addr, addr->sa_data);
- if (adapter->state != VNIC_PROBED)
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (adapter->state != VNIC_PROBED) {
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+ }
return rc;
}
@@ -1918,6 +1931,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rwi->reset_reason);
rtnl_lock();
+ /*
+ * Now that we have the rtnl lock, clear any pending failover.
+ * This will ensure ibmvnic_open() has either completed or will
+ * block until failover is complete.
+ */
+ if (rwi->reset_reason == VNIC_RESET_FAILOVER)
+ adapter->failover_pending = false;
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
@@ -2198,6 +2218,13 @@ static void __ibmvnic_reset(struct work_struct *work)
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
} else if (adapter->force_reset_recovery) {
+ /*
+ * Since we are doing a hard reset now, clear the
+ * failover_pending flag so we don't ignore any
+ * future MOBILITY or other resets.
+ */
+ adapter->failover_pending = false;
+
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
@@ -2262,9 +2289,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
+ /*
+ * If failover is pending don't schedule any other reset.
+ * Instead let the failover complete. If there is already a
+ * a failover reset scheduled, we will detect and drop the
+ * duplicate reset when walking the ->rwi_list below.
+ */
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED ||
- adapter->failover_pending) {
+ (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
ret = EBUSY;
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
goto err;
@@ -4709,7 +4742,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
- adapter->failover_pending = false;
if (!completion_done(&adapter->init_done)) {
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index c96e2f2d4cba..4919d22d7b6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2713,6 +2713,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ if (is_valid_ether_addr(al->list[i].addr) &&
+ is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr,
+ al->list[i].addr);
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2740,6 +2744,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
+ bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
i40e_status ret = 0;
@@ -2759,6 +2764,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+ was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2779,10 +2786,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
+ if (vf->trusted && was_unimac_deleted) {
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ u8 *macaddr = NULL;
+ int bkt;
+
+ /* set last unicast mac address as default */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (is_valid_ether_addr(f->macaddr))
+ macaddr = f->macaddr;
+ }
+ if (macaddr)
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 6acede0acdca..567fd67e900e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -281,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
struct sk_buff *skb;
- bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9112dff075cf..b673ac1199bb 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3891,21 +3891,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * igc_get_stats - Get System Network Statistics
+ * igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
* The statistics are updated here and also from the timer callback.
*/
-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+static void igc_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ spin_lock(&adapter->stats64_lock);
if (!test_bit(__IGC_RESETTING, &adapter->state))
igc_update_stats(adapter);
-
- /* only return the current stats */
- return &netdev->stats;
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
}
static netdev_features_t igc_fix_features(struct net_device *netdev,
@@ -4855,7 +4857,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
- .ndo_get_stats = igc_get_stats,
+ .ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b1fcc44f566a..b6f20e2034c6 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -6,6 +6,7 @@
config PRESTERA
tristate "Marvell Prestera Switch ASICs support"
depends on NET_SWITCHDEV && VLAN_8021Q
+ depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index e36e505d38ad..d29af7b9c695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ struct net_device *route_dev;
+
ether_addr_copy(e->h_dest, ha);
ether_addr_copy(eth->h_dest, ha);
/* Update the encap source mac, in case that we delete
* the flows when encap source mac changed.
*/
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
+ route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
+ if (route_dev)
+ ether_addr_copy(eth->h_source, route_dev->dev_addr);
mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 7cce85faa16f..90930e54b6f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct neighbour *n;
struct rtable *rt;
@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
ip_rt_put(rt);
return ret;
}
+ dev_hold(*route_dev);
if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
static const char *mlx5e_netdev_kind(struct net_device *dev)
{
if (dev->rtnl_link_ops)
@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
fl4.saddr = tun_key->u.ipv4.src;
ttl = tun_key->ttl;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
- &fl4, &n, &ttl);
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl4, &n, &ttl);
if (err)
return err;
@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -286,18 +296,18 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct dst_entry *dst;
struct neighbour *n;
@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return ret;
}
+ dev_hold(*route_dev);
n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e)
@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
- &fl6, &n, &ttl);
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl6, &n, &ttl);
if (err)
return err;
@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -441,7 +461,7 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 4e574ac73019..be3465ba38ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index fb671a457129..8e96260fce1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index ccaccb9fc2f7..7f6221b8b1f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
- spin_lock(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->channel->async_icosq_lock);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
return 0;
@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->stats->tls_resync_res_ok++;
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b3f02aac7f26..ebce97921e03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5253,6 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
+ mlx5_vxlan_reset_to_default(mdev->vxlan);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 9020d1419bcf..8932c387d46a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -186,7 +186,7 @@ struct mlx5e_encap_entry {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
- struct net_device *route_dev;
+ int route_dev_ifindex;
struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 599f5b5ebc97..6628a0197b4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1584,7 +1584,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
- if (rq->xdp_prog)
+ if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(cqwq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e3a968e9e2a0..2e2fa0440032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4658,6 +4658,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
@@ -4802,6 +4803,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
return 0;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6e6a9a563992..e8e6294c7cca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
err = 0;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
}
mutex_unlock(&esw->state_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 16091838bfcf..325a5b0d6829 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2010,10 +2010,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->modify_mask && fte->dests_size) {
- modify_fte(fte);
+ if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
up_write_ref_node(&fte->node, false);
- } else {
+ } else if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index d046db7bb047..3a9fa629503f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
- return devlink_net(priv_to_devlink(dev));
-}
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 3315afe2f8dc..38084400ee8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -168,6 +168,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
{
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
+ WARN_ON(!hash_empty(vxlan->htable));
+
+ kfree(vxlan);
+}
+
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
+{
struct mlx5_vxlan_port *vxlanp;
struct hlist_node *tmp;
int bkt;
@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
if (!mlx5_vxlan_allowed(vxlan))
return;
- /* Lockless since we are the only hash table consumers*/
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
- hash_del(&vxlanp->hlist);
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
- kfree(vxlanp);
+ /* Don't delete default UDP port added by the HW.
+ * Remove only user configured ports
+ */
+ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
+ continue;
+ mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
}
-
- kfree(vxlan);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index ec766529f49b..34ef662da35e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
#else
static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
+static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 7f77c2a71d1c..937b8e46f8c7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -620,6 +620,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_transmit(trans->core, trans);
if (err == 0)
return;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
} else {
err = -EIO;
}
@@ -2064,6 +2067,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (!reload)
+ devlink_free(devlink);
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 16b47fce540b..b08853f71b2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1174,11 +1174,14 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap_masked;
int err;
ops = mlxsw_sp->port_type_speed_ops;
- /* Set advertised speeds to supported speeds. */
+ /* Set advertised speeds to speeds supported by both the driver
+ * and the device.
+ */
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
@@ -1187,8 +1190,10 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
&eth_proto_admin, &eth_proto_oper);
+ eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_cap, mlxsw_sp_port->link.autoneg);
+ eth_proto_cap_masked,
+ mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3e26eb6cb140..74b3959b36d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -342,6 +342,7 @@ struct mlxsw_sp_port_type_speed_ops {
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
u32 *p_eth_proto_oper);
+ u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
static inline struct net_device *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2096b6478958..540616469e28 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1303,6 +1303,20 @@ mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
p_eth_proto_oper);
}
+static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp1_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
@@ -1313,6 +1327,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
};
static const enum ethtool_link_mode_bit_indices
@@ -1731,6 +1746,20 @@ mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
p_eth_proto_admin, p_eth_proto_oper);
}
+static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp2_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
@@ -1741,4 +1770,5 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a1938842f828..e2c99d909247 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -674,14 +674,12 @@ clean_up:
static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 select, u32 addr, u32 length, u32 *buf)
{
- int ret = -EIO;
u32 dp_sel;
int i;
- mutex_lock(&adapter->dp_lock);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
dp_sel |= select;
@@ -693,13 +691,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
}
- ret = 0;
-unlock:
- mutex_unlock(&adapter->dp_lock);
- return ret;
+ return 0;
}
static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
@@ -1019,16 +1014,16 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct lan743x_phy *phy = &adapter->phy;
+ struct phy_device *phydev = NULL;
struct device_node *phynode;
- struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
netdev = adapter->netdev;
phynode = of_node_get(adapter->pdev->dev.of_node);
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
if (phynode) {
+ /* try devicetree phy, or fixed link */
of_get_phy_mode(phynode, &adapter->phy_mode);
if (of_phy_is_fixed_link(phynode)) {
@@ -1044,13 +1039,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
lan743x_phy_link_status_change, 0,
adapter->phy_mode);
of_node_put(phynode);
- if (!phydev)
- goto return_error;
- } else {
+ }
+
+ if (!phydev) {
+ /* try internal phy */
phydev = phy_find_first(adapter->mdiobus);
if (!phydev)
goto return_error;
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
ret = phy_connect_direct(netdev, phydev,
lan743x_phy_link_status_change,
adapter->phy_mode);
@@ -2733,7 +2730,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
- mutex_init(&adapter->dp_lock);
ret = lan743x_gpio_init(adapter);
if (ret)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index c61a40411317..a536f4a4994d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -712,9 +712,6 @@ struct lan743x_adapter {
struct lan743x_csr csr;
struct lan743x_intr intr;
- /* lock, used to prevent concurrent access to data port */
- struct mutex dp_lock;
-
struct lan743x_gpio gpio;
struct lan743x_ptp ptp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 545c99b15df8..dc5fbc2704f3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -333,7 +333,7 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
.vf_setattr.attr = attr,
- .vf_setattr.vf_index = vf,
+ .vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
@@ -391,7 +391,7 @@ void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
{
union ionic_dev_cmd cmd = {
.q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
- .q_identify.lif_type = lif_type,
+ .q_identify.lif_type = cpu_to_le16(lif_type),
.q_identify.type = qtype,
.q_identify.ver = qver,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c109cd5a0471..6c243b17312c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -29,6 +29,7 @@ struct ionic_dev_bar {
int res_index;
};
+#ifndef __CHECKER__
/* Registers */
static_assert(sizeof(struct ionic_intr) == 32);
@@ -119,6 +120,7 @@ static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+#endif /* __CHECKER__ */
struct ionic_devinfo {
u8 asic_type;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index ed9808fc743b..35c72d4a78b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -126,6 +126,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(ks, supported);
+ if (!idev->port_info) {
+ netdev_err(netdev, "port_info not initialized\n");
+ return -EOPNOTSUPP;
+ }
+
/* The port_info data is found in a DMA space that the NIC keeps
* up-to-date, so there's no need to request the data from the
* NIC, we already have it in our memory space.
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index f492ae406a60..d7bbf336c6f6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -27,9 +27,9 @@ static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
{
union ionic_dev_cmd cmd = {
.fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
- .fw_download.offset = offset,
- .fw_download.addr = addr,
- .fw_download.length = length
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
};
ionic_dev_cmd_go(idev, &cmd);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index d655a7ae3058..a12df3946a07 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1656,7 +1656,6 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
- ionic_rx_flush(&lif->rxqcqs[i]->cq);
ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
@@ -1915,11 +1914,11 @@ static int ionic_get_vf_config(struct net_device *netdev,
ret = -EINVAL;
} else {
ivf->vf = vf;
- ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
ivf->qos = 0;
ivf->spoofchk = ionic->vfs[vf].spoofchk;
ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
ivf->trusted = ionic->vfs[vf].trusted;
ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
}
@@ -2019,7 +2018,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
if (!ret)
- ionic->vfs[vf].vlanid = vlan;
+ ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
up_write(&ionic->vf_op_lock);
@@ -2048,7 +2047,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
if (!ret)
- lif->ionic->vfs[vf].maxrate = tx_max;
+ lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
@@ -2981,14 +2980,14 @@ void ionic_lif_unregister(struct ionic_lif *lif)
static void ionic_lif_queue_identify(struct ionic_lif *lif)
{
+ union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
- union ionic_q_identity *q_ident;
struct ionic_dev *idev;
int qtype;
int err;
idev = &lif->ionic->idev;
- q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+ q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
@@ -3011,14 +3010,14 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
ionic_qtype_versions[qtype]);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
if (!err) {
- qti->version = q_ident->version;
- qti->supported = q_ident->supported;
- qti->features = le64_to_cpu(q_ident->features);
- qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
- qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
- qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
- qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
- qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ qti->version = readb(&q_ident->version);
+ qti->supported = readb(&q_ident->supported);
+ qti->features = readq(&q_ident->features);
+ qti->desc_sz = readw(&q_ident->desc_sz);
+ qti->comp_sz = readw(&q_ident->comp_sz);
+ qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
+ qti->max_sg_elems = readw(&q_ident->max_sg_elems);
+ qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
}
mutex_unlock(&ionic->dev_cmd_lock);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index ee0740881af3..d355676f6c16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -311,7 +311,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
iowrite32(0, &regs->doorbell);
memset_io(&regs->cmd, 0, sizeof(regs->cmd));
@@ -333,7 +333,7 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
- opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
+ opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
do {
done = ionic_dev_cmd_done(idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index 3f543512616e..2a725834f792 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -49,7 +49,7 @@ extern const int ionic_num_stats_grps;
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
- __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+ __le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 169ac4f54640..b3d2250c77d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -200,7 +200,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__wsum)le16_to_cpu(comp->csum);
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
stats->csum_complete++;
}
} else {
@@ -253,19 +253,6 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
-void ionic_rx_flush(struct ionic_cq *cq)
-{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_rx_service, NULL, NULL);
-
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_page_info *page_info)
{
@@ -413,22 +400,20 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- struct ionic_rxq_desc *desc;
- unsigned int i;
- u16 idx;
-
- idx = q->tail_idx;
- while (idx != q->head_idx) {
- desc_info = &q->info[idx];
- desc = desc_info->desc;
- desc->addr = 0;
- desc->len = 0;
+ struct ionic_page_info *page_info;
+ unsigned int i, j;
- for (i = 0; i < desc_info->npages; i++)
- ionic_rx_page_free(q, &desc_info->pages[i]);
+ for (i = 0; i < q->num_descs; i++) {
+ desc_info = &q->info[i];
+ for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ page_info = &desc_info->pages[j];
+ if (page_info->page)
+ ionic_rx_page_free(q, page_info);
+ }
+ desc_info->npages = 0;
+ desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- idx = (idx + 1) & (q->num_descs - 1);
}
}
@@ -812,6 +797,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
skb_frag_t *frag;
bool start, done;
bool outer_csum;
+ dma_addr_t addr;
bool has_vlan;
u16 desc_len;
u8 desc_nsge;
@@ -893,11 +879,10 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
if (frag_left > 0) {
len = min(frag_left, left);
frag_left -= len;
- elem->addr =
- cpu_to_le64(ionic_tx_map_frag(q, frag,
- offset, len));
- if (dma_mapping_error(dev, elem->addr))
+ addr = ionic_tx_map_frag(q, frag, offset, len);
+ if (dma_mapping_error(dev, addr))
goto err_out_abort;
+ elem->addr = cpu_to_le64(addr);
elem->len = cpu_to_le16(len);
elem++;
desc_nsge++;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index a5883be0413f..7667b72232b8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,7 +4,6 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
-void ionic_rx_flush(struct ionic_cq *cq);
void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3b6ddc706e92..85d9c3e30c69 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4080,9 +4080,17 @@ err_out:
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
{
- return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ return true;
+ default:
+ return false;
+ }
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4154,8 +4162,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !eth_skb_pad(skb);
+ if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+ /* eth_skb_pad would free the skb on error */
+ return !__skb_put_padto(skb, ETH_ZLEN, false);
}
return true;
@@ -4334,18 +4343,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->len < ETH_ZLEN) {
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_34:
- features &= ~NETIF_F_CSUM_MASK;
- break;
- default:
- break;
- }
- }
+ /* work around hw bug on some chip versions */
+ if (skb->len < ETH_ZLEN)
+ features &= ~NETIF_F_CSUM_MASK;
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
@@ -4573,7 +4573,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -4746,7 +4746,7 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
+ IRQF_SHARED, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9c4df4ede011..bd30505fbc57 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1744,12 +1744,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ break;
+ case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
- else
+ break;
+ default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index b6e5e3e36b63..81ee0a071b4e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -625,13 +625,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
@@ -641,6 +634,13 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
res.wol_irq = pci_irq_vector(pdev, 0);
res.irq = pci_irq_vector(pdev, 0);
+ if (plat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 220626a8d499..d833908b660a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4757,6 +4757,7 @@ static void stmmac_napi_add(struct net_device *dev)
ch->priv_data = priv;
ch->index = queue;
+ spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 4d02c5135611..4619c3a950b0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -728,7 +728,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 51cc29f39038..31c5e36ff706 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -639,13 +639,10 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
+ return -ERANGE;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 030a1a5afe05..dc668ed280b9 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -657,10 +657,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gtp = netdev_priv(dev);
- err = gtp_encap_enable(gtp, data);
- if (err < 0)
- return err;
-
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
@@ -671,12 +667,16 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
- goto out_encap;
+ return err;
+
+ err = gtp_encap_enable(gtp, data);
+ if (err < 0)
+ goto out_hashtable;
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
- goto out_hashtable;
+ goto out_encap;
}
gn = net_generic(dev_net(dev), gtp_net_id);
@@ -687,11 +687,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
return 0;
+out_encap:
+ gtp_encap_disable(gtp);
out_hashtable:
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
-out_encap:
- gtp_encap_disable(gtp);
return err;
}
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 43f5f5d93cb0..92642030e735 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -397,15 +397,24 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
/* assert(which < trans->tre_count); */
- /* Set the page information for the buffer. We also need to fill in
- * the DMA address and length for the buffer (something dma_map_sg()
- * normally does).
+ /* Commands are quite different from data transfer requests.
+ * Their payloads come from a pool whose memory is allocated
+ * using dma_alloc_coherent(). We therefore do *not* map them
+ * for DMA (unlike what we do for pages and skbs).
+ *
+ * When a transaction completes, the SGL is normally unmapped.
+ * A command transaction has direction DMA_NONE, which tells
+ * gsi_trans_complete() to skip the unmapping step.
+ *
+ * The only things we use directly in a command scatter/gather
+ * entry are the DMA address and length. We still need the SG
+ * table flags to be maintained though, so assign a NULL page
+ * pointer for that purpose.
*/
sg = &trans->sgl[which];
-
- sg_set_buf(sg, buf, size);
+ sg_assign_page(sg, NULL);
sg_dma_address(sg) = addr;
- sg_dma_len(sg) = sg->length;
+ sg_dma_len(sg) = size;
info = &trans->info[which];
info->opcode = opcode;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fb1db713b7fb..575580d3ffe0 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -551,6 +551,8 @@ static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 1d18c10e8f82..34aa196b7465 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2389,7 +2389,8 @@ static int sfp_probe(struct platform_device *pdev)
continue;
sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
- if (!sfp->gpio_irq[i]) {
+ if (sfp->gpio_irq[i] < 0) {
+ sfp->gpio_irq[i] = 0;
sfp->need_poll = true;
continue;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a322f51873d0..581ed51abb53 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1309,6 +1309,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 60c1aadece89..f2793ffde191 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -608,8 +608,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-static int vrf_finish_direct(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+static void vrf_finish_direct(struct sk_buff *skb)
{
struct net_device *vrf_dev = skb->dev;
@@ -628,7 +627,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
skb_pull(skb, ETH_HLEN);
}
- return 1;
+ /* reset skb device */
+ nf_reset_ct(skb);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -707,15 +707,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip6_local_out(net, sk, skb);
+}
+
static int vrf_output6_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IPV6);
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output6_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output6_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip6_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
@@ -728,18 +754,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output6_direct);
+ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
if (likely(err == 1))
err = vrf_output6_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset_ct(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
@@ -919,15 +942,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip_local_out(net, sk, skb);
+}
+
static int vrf_output_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
@@ -940,18 +989,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output_direct);
+ skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
if (likely(err == 1))
err = vrf_output_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset_ct(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f8aed0696d77..2369ca250cd6 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file,
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
+ kfree(kbuf);
return -ERESTARTSYS;
}
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 95ef4943d8bd..9b01afcb7777 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2060,8 +2060,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
if (id->nsattr & NVME_NS_ATTR_RO)
set_disk_ro(disk, true);
- else
- set_disk_ro(disk, false);
}
static inline bool nvme_first_scan(struct gendisk *disk)
@@ -2125,7 +2123,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
- if (ret)
+ if (ret && !nvme_first_scan(ns->disk))
return ret;
}
@@ -4582,8 +4580,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
-
-void nvme_sync_queues(struct nvme_ctrl *ctrl)
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
@@ -4591,7 +4588,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
+void nvme_sync_queues(struct nvme_ctrl *ctrl)
+{
+ nvme_sync_io_queues(ctrl);
if (ctrl->admin_q)
blk_sync_queue(ctrl->admin_q);
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 3c002bdcace3..f4c246462658 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -146,7 +146,8 @@ struct nvme_fc_rport {
/* fc_ctrl flags values - specified as bit positions */
#define ASSOC_ACTIVE 0
-#define FCCTRL_TERMIO 1
+#define ASSOC_FAILED 1
+#define FCCTRL_TERMIO 2
struct nvme_fc_ctrl {
spinlock_t lock;
@@ -157,7 +158,6 @@ struct nvme_fc_ctrl {
u32 cnum;
bool ioq_live;
- atomic_t err_work_active;
u64 association_id;
struct nvmefc_ls_rcv_op *rcv_disconn;
@@ -167,7 +167,6 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct delayed_work connect_work;
- struct work_struct err_work;
struct kref ref;
unsigned long flags;
@@ -2414,24 +2413,97 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
nvme_fc_ctrl_put(ctrl);
}
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static bool
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+
+ __nvme_fc_abort_op(ctrl, op);
+ return true;
+}
+
+/*
+ * This routine runs through all outstanding commands on the association
+ * and aborts them. This routine is typically be called by the
+ * delete_association routine. It is also called due to an error during
+ * reconnect. In that scenario, it is most likely a command that initializes
+ * the controller, including fabric Connect commands on io queues, that
+ * may have timed out or failed thus the io must be killed for the connect
+ * thread to see the error.
+ */
static void
-nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
- int active;
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ if (start_queues)
+ nvme_start_queues(&ctrl->ctrl);
+ }
+
+ /*
+ * Other transports, which don't have link-level contexts bound
+ * to sqe's, would try to gracefully shutdown the controller by
+ * writing the registers for shutdown and polling (call
+ * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
+ * just aborted and we will wait on those contexts, and given
+ * there was no indication of how live the controlelr is on the
+ * link, don't send more io to create more contexts for the
+ * shutdown. Let the controller fail via keepalive failure if
+ * its still present.
+ */
/*
- * if an error (io timeout, etc) while (re)connecting,
- * it's an error on creating the new association.
- * Start the error recovery thread if it hasn't already
- * been started. It is expected there could be multiple
- * ios hitting this path before things are cleaned up.
+ * clean up the admin queue. Same thing as above.
+ */
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+}
+
+static void
+nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+{
+ /*
+ * if an error (io timeout, etc) while (re)connecting, the remote
+ * port requested terminating of the association (disconnect_ls)
+ * or an error (timeout or abort) occurred on an io while creating
+ * the controller. Abort any ios on the association and let the
+ * create_association error path resolve things.
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
- active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
- atomic_set(&ctrl->err_work_active, 0);
- WARN_ON(1);
- }
+ __nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
return;
}
@@ -2745,30 +2817,6 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
-/*
- * This routine is used by the transport when it needs to find active
- * io on a queue that is to be terminated. The transport uses
- * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
- * this routine to kill them on a 1 by 1 basis.
- *
- * As FC allocates FC exchange for each io, the transport must contact
- * the LLDD to terminate the exchange, thus releasing the FC exchange.
- * After terminating the exchange the LLDD will call the transport's
- * normal io done path for the request, but it will have an aborted
- * status. The done path will return the io request back to the block
- * layer with an error status.
- */
-static bool
-nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
-{
- struct nvme_ctrl *nctrl = data;
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
-
- __nvme_fc_abort_op(ctrl, op);
- return true;
-}
-
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2988,6 +3036,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->cnum, ctrl->lport->localport.port_name,
ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
+ clear_bit(ASSOC_FAILED, &ctrl->flags);
+
/*
* Create the admin queue
*/
@@ -3016,7 +3066,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
*/
ret = nvme_enable_ctrl(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
@@ -3026,7 +3076,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
ret = nvme_init_identify(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
/* sanity checks */
@@ -3071,9 +3121,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ret = nvme_fc_create_io_queues(ctrl);
else
ret = nvme_fc_recreate_io_queues(ctrl);
- if (ret)
- goto out_term_aen_ops;
}
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
+ goto out_term_aen_ops;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -3108,60 +3158,6 @@ out_free_queue:
/*
- * This routine runs through all outstanding commands on the association
- * and aborts them. This routine is typically be called by the
- * delete_association routine. It is also called due to an error during
- * reconnect. In that scenario, it is most likely a command that initializes
- * the controller, including fabric Connect commands on io queues, that
- * may have timed out or failed thus the io must be killed for the connect
- * thread to see the error.
- */
-static void
-__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
-{
- /*
- * If io queues are present, stop them and terminate all outstanding
- * ios on them. As FC allocates FC exchange for each io, the
- * transport must contact the LLDD to terminate the exchange,
- * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
- * to tell us what io's are busy and invoke a transport routine
- * to kill them with the LLDD. After terminating the exchange
- * the LLDD will call the transport's normal io done path, but it
- * will have an aborted status. The done path will return the
- * io requests back to the block layer as part of normal completions
- * (but with error status).
- */
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
- if (start_queues)
- nvme_start_queues(&ctrl->ctrl);
- }
-
- /*
- * Other transports, which don't have link-level contexts bound
- * to sqe's, would try to gracefully shutdown the controller by
- * writing the registers for shutdown and polling (call
- * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
- * just aborted and we will wait on those contexts, and given
- * there was no indication of how live the controlelr is on the
- * link, don't send more io to create more contexts for the
- * shutdown. Let the controller fail via keepalive failure if
- * its still present.
- */
-
- /*
- * clean up the admin queue. Same thing as above.
- */
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
-}
-
-/*
* This routine stops operation of the controller on the host side.
* On the host os stack side: Admin and IO queues are stopped,
* outstanding ios on them terminated via FC ABTS.
@@ -3237,7 +3233,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3292,78 +3287,34 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
}
static void
-__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
+nvme_fc_reset_ctrl_work(struct work_struct *work)
{
- /*
- * if state is CONNECTING - the error occurred as part of a
- * reconnect attempt. Abort any ios on the association and
- * let the create_association error paths resolve things.
- */
- if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
- __nvme_fc_abort_outstanding_ios(ctrl, true);
- return;
- }
-
- /*
- * For any other state, kill the association. As this routine
- * is a common io abort routine for resetting and such, after
- * the association is terminated, ensure that the state is set
- * to CONNECTING.
- */
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- nvme_stop_keep_alive(&ctrl->ctrl);
+ nvme_stop_ctrl(&ctrl->ctrl);
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
"to CONNECTING\n", ctrl->cnum);
-}
-
-static void
-nvme_fc_reset_ctrl_work(struct work_struct *work)
-{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- int ret;
- __nvme_fc_terminate_io(ctrl);
-
- nvme_stop_ctrl(&ctrl->ctrl);
-
- if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
- ret = nvme_fc_create_association(ctrl);
- else
- ret = -ENOTCONN;
-
- if (ret)
- nvme_fc_reconnect_or_delete(ctrl, ret);
- else
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reset complete\n",
- ctrl->cnum);
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule connect "
+ "after reset\n", ctrl->cnum);
+ } else {
+ flush_delayed_work(&ctrl->connect_work);
+ }
+ } else {
+ nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
+ }
}
-static void
-nvme_fc_connect_err_work(struct work_struct *work)
-{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, err_work);
-
- __nvme_fc_terminate_io(ctrl);
-
- atomic_set(&ctrl->err_work_active, 0);
-
- /*
- * Rescheduling the connection after recovering
- * from the io error is left to the reconnect work
- * item, which is what should have stalled waiting on
- * the io that had the error that scheduled this work.
- */
-}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
@@ -3491,7 +3442,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->dev = lport->dev;
ctrl->cnum = idx;
ctrl->ioq_live = false;
- atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@@ -3499,7 +3449,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
- INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3592,7 +3541,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ctrl.reset_work);
- cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cc111136a981..bc330bf0d3bd 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index df8f3612107f..0578ff253c47 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -198,6 +198,7 @@ struct nvme_queue {
u32 q_depth;
u16 cq_vector;
u16 sq_tail;
+ u16 last_sq_tail;
u16 cq_head;
u16 qid;
u8 cq_phase;
@@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
return 0;
}
-static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
+/*
+ * Write sq tail if we are asked to, or if the next command would wrap.
+ */
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
{
+ if (!write_sq) {
+ u16 next_tail = nvmeq->sq_tail + 1;
+
+ if (next_tail == nvmeq->q_depth)
+ next_tail = 0;
+ if (next_tail != nvmeq->last_sq_tail)
+ return;
+ }
+
if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
writel(nvmeq->sq_tail, nvmeq->q_db);
+ nvmeq->last_sq_tail = nvmeq->sq_tail;
}
/**
@@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- if (write_sq)
- nvme_write_sq_db(nvmeq);
+ nvme_write_sq_db(nvmeq, write_sq);
spin_unlock(&nvmeq->sq_lock);
}
@@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
struct nvme_queue *nvmeq = hctx->driver_data;
spin_lock(&nvmeq->sq_lock);
- nvme_write_sq_db(nvmeq);
+ if (nvmeq->sq_tail != nvmeq->last_sq_tail)
+ nvme_write_sq_db(nvmeq, true);
spin_unlock(&nvmeq->sq_lock);
}
@@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
struct nvme_dev *dev = nvmeq->dev;
nvmeq->sq_tail = 0;
+ nvmeq->last_sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index aad829a2b50d..65e3d0ef36e1 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -122,7 +122,6 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
- struct mutex teardown_lock;
bool use_inline_data;
u32 io_queues[HCTX_MAX_TYPES];
};
@@ -1010,8 +1009,8 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- mutex_lock(&ctrl->teardown_lock);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
@@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
- mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- mutex_lock(&ctrl->teardown_lock);
if (ctrl->ctrl.queue_count > 1) {
nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
if (ctrl->ctrl.tagset) {
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
@@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
}
- mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@ -1768,6 +1765,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
+ /* sanity checking for received data length */
+ if (unlikely(wc->byte_len < len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected nvme completion length(%d)\n", wc->byte_len);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return;
+ }
+
ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
/*
* AEN requests are special as they don't time out and can
@@ -1890,10 +1895,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
}
- ret = rdma_connect(queue->cm_id, &param);
+ ret = rdma_connect_locked(queue->cm_id, &param);
if (ret) {
dev_err(ctrl->ctrl.device,
- "rdma_connect failed (%d).\n", ret);
+ "rdma_connect_locked failed (%d).\n", ret);
goto out_destroy_queue_ib;
}
@@ -1968,16 +1973,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
- struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- /* fence other contexts that may complete the command */
- mutex_lock(&ctrl->teardown_lock);
nvme_rdma_stop_queue(queue);
- if (!blk_mq_request_completed(rq)) {
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
- mutex_unlock(&ctrl->teardown_lock);
}
static enum blk_eh_timer_return
@@ -2312,7 +2313,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
return ERR_PTR(-ENOMEM);
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
- mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d6a3e1487354..c0c33320fe65 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -124,7 +124,6 @@ struct nvme_tcp_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
- struct mutex teardown_lock;
struct work_struct err_work;
struct delayed_work connect_work;
struct nvme_tcp_request async_req;
@@ -1886,8 +1885,8 @@ out_free_queue:
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
blk_mq_quiesce_queue(ctrl->admin_q);
+ blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
@@ -1897,18 +1896,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
bool remove)
{
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
if (ctrl->queue_count <= 1)
- goto out;
+ return;
blk_mq_quiesce_queue(ctrl->admin_q);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
if (ctrl->tagset) {
blk_mq_tagset_busy_iter(ctrl->tagset,
@@ -1918,8 +1916,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
-out:
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
@@ -2171,14 +2167,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- /* fence other contexts that may complete the command */
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
- if (!blk_mq_request_completed(rq)) {
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static enum blk_eh_timer_return
@@ -2455,7 +2448,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
nvme_tcp_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
- mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index aafcbc424b7a..957b39a82431 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -907,8 +907,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
- trace_nvmet_req_init(req, req->cmd);
-
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
@@ -938,6 +936,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
if (status)
goto fail;
+ trace_nvmet_req_init(req, req->cmd);
+
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 0458046d6501..c14e3249a14d 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
return req->sq->ctrl;
}
-static inline void __assign_disk_name(char *name, struct nvmet_req *req,
- bool init)
+static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
- struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
- struct nvmet_ns *ns;
-
- if ((init && req->sq->qid) || (!init && req->cq->qid)) {
- ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
- strncpy(name, ns->device_path, DISK_NAME_LEN);
- return;
- }
-
- memset(name, 0, DISK_NAME_LEN);
+ if (req->ns)
+ strncpy(name, req->ns->device_path, DISK_NAME_LEN);
+ else
+ memset(name, 0, DISK_NAME_LEN);
}
#endif
@@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init,
TP_fast_assign(
__entry->cmd = cmd;
__entry->ctrl = nvmet_req_to_ctrl(req);
- __assign_disk_name(__entry->disk, req, true);
+ __assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
__entry->opcode = cmd->common.opcode;
@@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete,
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
__entry->status = le16_to_cpu(req->cqe->status) >> 1;
- __assign_disk_name(__entry->disk, req, false);
+ __assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
__print_ctrl_name(__entry->ctrl),
diff --git a/drivers/of/address.c b/drivers/of/address.c
index eb9ab4f1e80b..1c3257a2d4e3 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1034,11 +1034,13 @@ out:
*/
bool of_dma_is_coherent(struct device_node *np)
{
- struct device_node *node = of_node_get(np);
+ struct device_node *node;
if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
return true;
+ node = of_node_get(np);
+
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 655dee422563..aedfaaafd3e7 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -93,7 +93,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
{
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
- dma_addr_t dma_start = 0;
+ u64 dma_start = 0;
u64 mask, end, size = 0;
bool coherent;
int ret;
@@ -109,10 +109,10 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
return ret == -ENODEV ? 0 : ret;
} else {
const struct bus_dma_region *r = map;
- dma_addr_t dma_end = 0;
+ u64 dma_end = 0;
/* Determine the overall bounds of all DMA regions */
- for (dma_start = ~(dma_addr_t)0; r->size; r++) {
+ for (dma_start = ~0; r->size; r++) {
/* Take lower and upper limits */
if (r->dma_start < dma_start)
dma_start = r->dma_start;
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index bcd154485972..a7fbc5e37e19 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -200,6 +200,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
if (ra->base > rb->base)
return 1;
+ /*
+ * Put the dynamic allocations (address == 0, size == 0) before static
+ * allocations at address 0x0 so that overlap detection works
+ * correctly.
+ */
+ if (ra->size < rb->size)
+ return -1;
+ if (ra->size > rb->size)
+ return 1;
+
return 0;
}
@@ -217,8 +227,7 @@ static void __init __rmem_check_for_overlap(void)
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
- if (!(this->base && next->base))
- continue;
+
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 2483e765318a..0e0a5269dc82 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1181,6 +1181,10 @@ static void _opp_table_kref_release(struct kref *kref)
struct opp_device *opp_dev, *temp;
int i;
+ /* Drop the lock as soon as we can */
+ list_del(&opp_table->node);
+ mutex_unlock(&opp_table_lock);
+
_of_clear_opp_table(opp_table);
/* Release clk */
@@ -1208,10 +1212,7 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
- list_del(&opp_table->node);
kfree(opp_table);
-
- mutex_unlock(&opp_table_lock);
}
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
@@ -1930,7 +1931,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
return ERR_PTR(-EINVAL);
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!IS_ERR(opp_table))
+ if (IS_ERR(opp_table))
return opp_table;
/* This should be called before OPPs are initialized */
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 874b58756220..9faeb83e4b32 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -944,6 +944,8 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
nr -= 2;
}
+ return 0;
+
remove_static_opp:
_opp_remove_all_static(opp_table);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 674f32db85ca..44c2a6572199 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -586,8 +586,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* ATU, so we should not program the ATU here.
*/
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- struct resource_entry *entry =
- resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ struct resource_entry *tmp, *entry = NULL;
+
+ /* Get last memory resource entry */
+ resource_list_for_each_entry(tmp, &pp->bridge->windows)
+ if (resource_type(tmp->res) == IORESOURCE_MEM)
+ entry = tmp;
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, entry->res->start,
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index eee82838f4ba..ed13e81cd691 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -958,25 +958,16 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * We can't use devm_of_pci_get_host_bridge_resources() because we
- * need to parse our special DT properties encoding the MEM and IO
- * apertures.
+ * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * so we need extra resource setup parsing our special DT properties encoding
+ * the MEM and IO apertures.
*/
static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
- /* Get the bus range */
- ret = of_pci_parse_bus_range(np, &pcie->busn);
- if (ret) {
- dev_err(dev, "failed to parse bus-range property: %d\n", ret);
- return ret;
- }
- pci_add_resource(&bridge->windows, &pcie->busn);
-
/* Get the PCIe memory aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
@@ -986,6 +977,9 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
pcie->mem.name = "PCI MEM";
pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
+ if (ret)
+ return ret;
/* Get the PCIe IO aperture */
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
@@ -999,9 +993,12 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
pcie->realio.name = "PCI I/O";
pci_add_resource(&bridge->windows, &pcie->realio);
+ ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
+ if (ret)
+ return ret;
}
- return devm_request_pci_bus_resources(dev, &bridge->windows);
+ return 0;
}
/*
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6d4d5a2f923d..e578d34095e9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3516,8 +3516,13 @@ void pci_acs_init(struct pci_dev *dev)
{
dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
- if (dev->acs_cap)
- pci_enable_acs(dev);
+ /*
+ * Attempt to enable ACS regardless of capability because some Root
+ * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
+ * the standard ACS capability but still support ACS via those
+ * quirks.
+ */
+ pci_enable_acs(dev);
}
/**
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 6a94eaecf638..d6b849552a1e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -286,13 +286,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
{
/*
- * The signal type is GPIO if the signal name has "GPIO" as a prefix.
+ * The signal type is GPIO if the signal name has "GPI" as a prefix.
* strncmp (rather than strcmp) is used to implement the prefix
* requirement.
*
- * expr->signal might look like "GPIOT3" in the GPIO case.
+ * expr->signal might look like "GPIOB1" in the GPIO case.
+ * expr->signal might look like "GPIT0" in the GPI case.
*/
- return strncmp(expr->signal, "GPIO", 4) == 0;
+ return strncmp(expr->signal, "GPI", 3) == 0;
}
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 154ce3f908cd..1c10ab184783 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -62,10 +62,10 @@
#define PADCFG1_TERM_UP BIT(13)
#define PADCFG1_TERM_SHIFT 10
#define PADCFG1_TERM_MASK GENMASK(12, 10)
-#define PADCFG1_TERM_20K 4
-#define PADCFG1_TERM_2K 3
-#define PADCFG1_TERM_5K 2
-#define PADCFG1_TERM_1K 1
+#define PADCFG1_TERM_20K BIT(2)
+#define PADCFG1_TERM_5K BIT(1)
+#define PADCFG1_TERM_1K BIT(0)
+#define PADCFG1_TERM_833 (BIT(1) | BIT(0))
#define PADCFG2 0x008
#define PADCFG2_DEBEN BIT(0)
@@ -549,12 +549,12 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return -EINVAL;
switch (term) {
+ case PADCFG1_TERM_833:
+ *arg = 833;
+ break;
case PADCFG1_TERM_1K:
*arg = 1000;
break;
- case PADCFG1_TERM_2K:
- *arg = 2000;
- break;
case PADCFG1_TERM_5K:
*arg = 5000;
break;
@@ -570,6 +570,11 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return -EINVAL;
switch (term) {
+ case PADCFG1_TERM_833:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ return -EINVAL;
+ *arg = 833;
+ break;
case PADCFG1_TERM_1K:
if (!(community->features & PINCTRL_FEATURE_1K_PD))
return -EINVAL;
@@ -678,6 +683,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
value |= PADCFG1_TERM_UP;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
+
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -685,12 +694,12 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
- case 2000:
- value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT;
- break;
case 1000:
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
break;
+ case 833:
+ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
+ break;
default:
ret = -EINVAL;
}
@@ -700,6 +709,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
+
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -714,6 +727,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
}
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
break;
+ case 833:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
+ ret = -EINVAL;
+ break;
+ }
+ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9a760f5cd7ed..4aea3e05e8c6 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -156,7 +156,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
} else if (debounce < 250000) {
- time = debounce / 15600;
+ time = debounce / 15625;
pin_reg |= time & DB_TMR_OUT_MASK;
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg |= BIT(DB_TMR_LARGE_OFF);
@@ -166,14 +166,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg |= BIT(DB_TMR_LARGE_OFF);
} else {
- pin_reg &= ~DB_CNTRl_MASK;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
ret = -EINVAL;
}
} else {
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
pin_reg &= ~DB_TMR_OUT_MASK;
- pin_reg &= ~DB_CNTRl_MASK;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
}
writel(pin_reg, gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index c8e50a58a5e5..621909b01deb 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -635,44 +635,44 @@ static int jz4770_uart3_data_pins[] = { 0x6c, 0x85, };
static int jz4770_uart3_hwflow_pins[] = { 0x88, 0x89, };
static int jz4770_ssi0_dt_a_pins[] = { 0x15, };
static int jz4770_ssi0_dt_b_pins[] = { 0x35, };
-static int jz4770_ssi0_dt_d_pins[] = { 0x55, };
-static int jz4770_ssi0_dt_e_pins[] = { 0x71, };
+static int jz4770_ssi0_dt_d_pins[] = { 0x75, };
+static int jz4770_ssi0_dt_e_pins[] = { 0x91, };
static int jz4770_ssi0_dr_a_pins[] = { 0x14, };
static int jz4770_ssi0_dr_b_pins[] = { 0x34, };
-static int jz4770_ssi0_dr_d_pins[] = { 0x54, };
-static int jz4770_ssi0_dr_e_pins[] = { 0x6e, };
+static int jz4770_ssi0_dr_d_pins[] = { 0x74, };
+static int jz4770_ssi0_dr_e_pins[] = { 0x8e, };
static int jz4770_ssi0_clk_a_pins[] = { 0x12, };
static int jz4770_ssi0_clk_b_pins[] = { 0x3c, };
-static int jz4770_ssi0_clk_d_pins[] = { 0x58, };
-static int jz4770_ssi0_clk_e_pins[] = { 0x6f, };
+static int jz4770_ssi0_clk_d_pins[] = { 0x78, };
+static int jz4770_ssi0_clk_e_pins[] = { 0x8f, };
static int jz4770_ssi0_gpc_b_pins[] = { 0x3e, };
-static int jz4770_ssi0_gpc_d_pins[] = { 0x56, };
-static int jz4770_ssi0_gpc_e_pins[] = { 0x73, };
+static int jz4770_ssi0_gpc_d_pins[] = { 0x76, };
+static int jz4770_ssi0_gpc_e_pins[] = { 0x93, };
static int jz4770_ssi0_ce0_a_pins[] = { 0x13, };
static int jz4770_ssi0_ce0_b_pins[] = { 0x3d, };
-static int jz4770_ssi0_ce0_d_pins[] = { 0x59, };
-static int jz4770_ssi0_ce0_e_pins[] = { 0x70, };
+static int jz4770_ssi0_ce0_d_pins[] = { 0x79, };
+static int jz4770_ssi0_ce0_e_pins[] = { 0x90, };
static int jz4770_ssi0_ce1_b_pins[] = { 0x3f, };
-static int jz4770_ssi0_ce1_d_pins[] = { 0x57, };
-static int jz4770_ssi0_ce1_e_pins[] = { 0x72, };
+static int jz4770_ssi0_ce1_d_pins[] = { 0x77, };
+static int jz4770_ssi0_ce1_e_pins[] = { 0x92, };
static int jz4770_ssi1_dt_b_pins[] = { 0x35, };
-static int jz4770_ssi1_dt_d_pins[] = { 0x55, };
-static int jz4770_ssi1_dt_e_pins[] = { 0x71, };
+static int jz4770_ssi1_dt_d_pins[] = { 0x75, };
+static int jz4770_ssi1_dt_e_pins[] = { 0x91, };
static int jz4770_ssi1_dr_b_pins[] = { 0x34, };
-static int jz4770_ssi1_dr_d_pins[] = { 0x54, };
-static int jz4770_ssi1_dr_e_pins[] = { 0x6e, };
+static int jz4770_ssi1_dr_d_pins[] = { 0x74, };
+static int jz4770_ssi1_dr_e_pins[] = { 0x8e, };
static int jz4770_ssi1_clk_b_pins[] = { 0x3c, };
-static int jz4770_ssi1_clk_d_pins[] = { 0x58, };
-static int jz4770_ssi1_clk_e_pins[] = { 0x6f, };
+static int jz4770_ssi1_clk_d_pins[] = { 0x78, };
+static int jz4770_ssi1_clk_e_pins[] = { 0x8f, };
static int jz4770_ssi1_gpc_b_pins[] = { 0x3e, };
-static int jz4770_ssi1_gpc_d_pins[] = { 0x56, };
-static int jz4770_ssi1_gpc_e_pins[] = { 0x73, };
+static int jz4770_ssi1_gpc_d_pins[] = { 0x76, };
+static int jz4770_ssi1_gpc_e_pins[] = { 0x93, };
static int jz4770_ssi1_ce0_b_pins[] = { 0x3d, };
-static int jz4770_ssi1_ce0_d_pins[] = { 0x59, };
-static int jz4770_ssi1_ce0_e_pins[] = { 0x70, };
+static int jz4770_ssi1_ce0_d_pins[] = { 0x79, };
+static int jz4770_ssi1_ce0_e_pins[] = { 0x90, };
static int jz4770_ssi1_ce1_b_pins[] = { 0x3f, };
-static int jz4770_ssi1_ce1_d_pins[] = { 0x57, };
-static int jz4770_ssi1_ce1_e_pins[] = { 0x72, };
+static int jz4770_ssi1_ce1_d_pins[] = { 0x77, };
+static int jz4770_ssi1_ce1_e_pins[] = { 0x92, };
static int jz4770_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
static int jz4770_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
@@ -1050,35 +1050,35 @@ static int jz4780_ssi0_dt_a_19_pins[] = { 0x13, };
static int jz4780_ssi0_dt_a_21_pins[] = { 0x15, };
static int jz4780_ssi0_dt_a_28_pins[] = { 0x1c, };
static int jz4780_ssi0_dt_b_pins[] = { 0x3d, };
-static int jz4780_ssi0_dt_d_pins[] = { 0x59, };
+static int jz4780_ssi0_dt_d_pins[] = { 0x79, };
static int jz4780_ssi0_dr_a_20_pins[] = { 0x14, };
static int jz4780_ssi0_dr_a_27_pins[] = { 0x1b, };
static int jz4780_ssi0_dr_b_pins[] = { 0x34, };
-static int jz4780_ssi0_dr_d_pins[] = { 0x54, };
+static int jz4780_ssi0_dr_d_pins[] = { 0x74, };
static int jz4780_ssi0_clk_a_pins[] = { 0x12, };
static int jz4780_ssi0_clk_b_5_pins[] = { 0x25, };
static int jz4780_ssi0_clk_b_28_pins[] = { 0x3c, };
-static int jz4780_ssi0_clk_d_pins[] = { 0x58, };
+static int jz4780_ssi0_clk_d_pins[] = { 0x78, };
static int jz4780_ssi0_gpc_b_pins[] = { 0x3e, };
-static int jz4780_ssi0_gpc_d_pins[] = { 0x56, };
+static int jz4780_ssi0_gpc_d_pins[] = { 0x76, };
static int jz4780_ssi0_ce0_a_23_pins[] = { 0x17, };
static int jz4780_ssi0_ce0_a_25_pins[] = { 0x19, };
static int jz4780_ssi0_ce0_b_pins[] = { 0x3f, };
-static int jz4780_ssi0_ce0_d_pins[] = { 0x57, };
+static int jz4780_ssi0_ce0_d_pins[] = { 0x77, };
static int jz4780_ssi0_ce1_b_pins[] = { 0x35, };
-static int jz4780_ssi0_ce1_d_pins[] = { 0x55, };
+static int jz4780_ssi0_ce1_d_pins[] = { 0x75, };
static int jz4780_ssi1_dt_b_pins[] = { 0x3d, };
-static int jz4780_ssi1_dt_d_pins[] = { 0x59, };
+static int jz4780_ssi1_dt_d_pins[] = { 0x79, };
static int jz4780_ssi1_dr_b_pins[] = { 0x34, };
-static int jz4780_ssi1_dr_d_pins[] = { 0x54, };
+static int jz4780_ssi1_dr_d_pins[] = { 0x74, };
static int jz4780_ssi1_clk_b_pins[] = { 0x3c, };
-static int jz4780_ssi1_clk_d_pins[] = { 0x58, };
+static int jz4780_ssi1_clk_d_pins[] = { 0x78, };
static int jz4780_ssi1_gpc_b_pins[] = { 0x3e, };
-static int jz4780_ssi1_gpc_d_pins[] = { 0x56, };
+static int jz4780_ssi1_gpc_d_pins[] = { 0x76, };
static int jz4780_ssi1_ce0_b_pins[] = { 0x3f, };
-static int jz4780_ssi1_ce0_d_pins[] = { 0x57, };
+static int jz4780_ssi1_ce0_d_pins[] = { 0x77, };
static int jz4780_ssi1_ce1_b_pins[] = { 0x35, };
-static int jz4780_ssi1_ce1_d_pins[] = { 0x55, };
+static int jz4780_ssi1_ce1_d_pins[] = { 0x75, };
static int jz4780_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
static int jz4780_i2c3_pins[] = { 0x6a, 0x6b, };
static int jz4780_i2c4_e_pins[] = { 0x8c, 0x8d, };
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
index 1f47a661b0a7..9ae10318f6f3 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -119,13 +119,15 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
return -EINVAL;
}
- copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL);
+ copy = devm_kmemdup(dev, config, sizeof(*config), GFP_KERNEL);
if (!copy)
return -ENOMEM;
copy->name = name;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
+ if (IS_ERR(mcp->regmap))
+ dev_err(dev, "regmap init failed for %s\n", mcp->chip.label);
return PTR_ERR_OR_ZERO(mcp->regmap);
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 0401c1da79dd..aa1a1c850d05 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3155,7 +3155,9 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
if (!bank->domain)
return -ENXIO;
+ clk_enable(bank->clk);
virq = irq_create_mapping(bank->domain, offset);
+ clk_disable(bank->clk);
return (virq) ? : -ENXIO;
}
@@ -3194,7 +3196,7 @@ static void rockchip_irq_demux(struct irq_desc *desc)
irq = __ffs(pend);
pend &= ~BIT(irq);
- virq = irq_linear_revmap(bank->domain, irq);
+ virq = irq_find_mapping(bank->domain, irq);
if (!virq) {
dev_err(bank->drvdata->dev, "unmapped irq %d\n", irq);
@@ -3373,7 +3375,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
int ret;
- int i, j;
+ int i;
for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
if (!bank->valid) {
@@ -3400,7 +3402,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
"rockchip_gpio_irq", handle_level_irq,
- clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ clr, 0, 0);
if (ret) {
dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
bank->name);
@@ -3409,14 +3411,6 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
continue;
}
- /*
- * Linux assumes that all interrupts start out disabled/masked.
- * Our driver only uses the concept of masked and always keeps
- * things enabled, so for us that's all masked and all enabled.
- */
- writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
- writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
-
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
@@ -3433,13 +3427,17 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
+ /*
+ * Linux assumes that all interrupts start out disabled/masked.
+ * Our driver only uses the concept of masked and always keeps
+ * things enabled, so for us that's all masked and all enabled.
+ */
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
+ gc->mask_cache = 0xffffffff;
+
irq_set_chained_handler_and_data(bank->irq,
rockchip_irq_demux, bank);
-
- /* map the gpio irqs here, when the clock is still running */
- for (j = 0 ; j < 32 ; j++)
- irq_create_mapping(bank->domain, j);
-
clk_disable(bank->clk);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c4bcda90aac4..77a25bdf0da7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -815,21 +815,14 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
static void msm_gpio_irq_enable(struct irq_data *d)
{
- /*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
- */
- if (d->parent_data) {
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data)
irq_chip_enable_parent(d);
- }
- msm_gpio_irq_clear_unmask(d, true);
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ msm_gpio_irq_clear_unmask(d, true);
}
static void msm_gpio_irq_disable(struct irq_data *d)
@@ -1104,6 +1097,19 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
ret = -EINVAL;
goto out;
}
+
+ /*
+ * Clear the interrupt that may be pending before we enable
+ * the line.
+ * This is especially a problem with the GPIOs routed to the
+ * PDC. These GPIOs are direct-connect interrupts to the GIC.
+ * Disabling the interrupt line at the PDC does not prevent
+ * the interrupt from being latched at the GIC. The state at
+ * GIC needs to be cleared before enabling.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+
return 0;
out:
module_put(gc->owner);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index 826df0d637ea..af144e724bd9 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1313,6 +1313,22 @@ static const struct msm_pingroup sm8250_groups[] = {
[183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
+ { 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
+ { 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
+ { 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
+ { 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
+ { 84, 98 }, { 86, 99 }, { 87, 100 }, { 88, 101 }, { 89, 102 },
+ { 92, 103 }, { 93, 104 }, { 100, 53 }, { 103, 47 }, { 104, 48 },
+ { 108, 49 }, { 109, 94 }, { 110, 95 }, { 111, 96 }, { 112, 55 },
+ { 113, 56 }, { 118, 50 }, { 121, 51 }, { 122, 57 }, { 123, 58 },
+ { 124, 45 }, { 126, 59 }, { 128, 76 }, { 129, 86 }, { 132, 93 },
+ { 133, 65 }, { 134, 66 }, { 136, 62 }, { 137, 63 }, { 138, 64 },
+ { 142, 60 }, { 143, 61 }
+};
+
static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
.pins = sm8250_pins,
.npins = ARRAY_SIZE(sm8250_pins),
@@ -1323,6 +1339,8 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
.ngpios = 181,
.tiles = sm8250_tiles,
.ntiles = ARRAY_SIZE(sm8250_tiles),
+ .wakeirq_map = sm8250_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map),
};
static int sm8250_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 3bf18d718975..a50ab002e9e4 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -51,7 +51,7 @@ static void pnp_remove_protocol(struct pnp_protocol *protocol)
}
/**
- * pnp_protocol_register - adds a pnp protocol to the pnp layer
+ * pnp_register_protocol - adds a pnp protocol to the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*
* Ex protocols: ISAPNP, PNPBIOS, etc
@@ -91,7 +91,7 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
}
/**
- * pnp_protocol_unregister - removes a pnp protocol from the pnp layer
+ * pnp_unregister_protocol - removes a pnp protocol from the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*/
void pnp_unregister_protocol(struct pnp_protocol *protocol)
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 0b2830efc574..70d6d52bc1e2 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -620,7 +620,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
case ARBITRARY_UNIT:
default:
return value;
- };
+ }
if (to_raw)
return div64_u64(value, units) * scale;
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index f808c5fa9838..3f0b8e2ef3d4 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -367,9 +367,9 @@ static void create_power_zone_common_attributes(
&dev_attr_max_energy_range_uj.attr;
if (power_zone->ops->get_energy_uj) {
if (power_zone->ops->reset_energy_uj)
- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+ dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR;
else
- dev_attr_energy_uj.attr.mode = S_IRUGO;
+ dev_attr_energy_uj.attr.mode = S_IRUSR;
power_zone->zone_dev_attrs[count++] =
&dev_attr_energy_uj.attr;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a4ffd71696da..a5ad553da8cd 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4165,6 +4165,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
ret = rdev->desc->fixed_uV;
} else if (rdev->supply) {
ret = regulator_get_voltage_rdev(rdev->supply->rdev);
+ } else if (rdev->supply_name) {
+ return -EPROBE_DEFER;
} else {
return -EINVAL;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 485cbfcbf06e..ef738b42a092 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -680,7 +680,10 @@ static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
- int card, queue, devres, drvres, rc;
+ int card, queue, devres, drvres, rc = -ENODEV;
+
+ if (!get_device(dev))
+ return rc;
if (is_queue_dev(dev)) {
/*
@@ -697,7 +700,7 @@ static int ap_device_probe(struct device *dev)
mutex_unlock(&ap_perms_mutex);
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
- return -ENODEV;
+ goto out;
}
/* Add queue/card to list of active queues/cards */
@@ -718,6 +721,9 @@ static int ap_device_probe(struct device *dev)
ap_dev->drv = NULL;
}
+out:
+ if (rc)
+ put_device(dev);
return rc;
}
@@ -744,6 +750,8 @@ static int ap_device_remove(struct device *dev)
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
+ put_device(dev);
+
return 0;
}
@@ -1371,6 +1379,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
__func__, ac->id, dom);
goto put_dev_and_continue;
}
+ /* get it and thus adjust reference counter */
+ get_device(dev);
if (decfg)
AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
__func__, ac->id, dom);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 99cb60ea663d..dd84995049b9 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -35,9 +35,6 @@ MODULE_DESCRIPTION("s390 protected key interface");
#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
-/* mask of available pckmo subfunctions, fetched once at module init */
-static cpacf_mask_t pckmo_functions;
-
/*
* debug feature data and functions
*/
@@ -91,6 +88,9 @@ static int pkey_clr2protkey(u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey)
{
+ /* mask of available pckmo subfunctions */
+ static cpacf_mask_t pckmo_functions;
+
long fc;
int keysize;
u8 paramblock[64];
@@ -114,11 +114,13 @@ static int pkey_clr2protkey(u32 keytype,
return -EINVAL;
}
- /*
- * Check if the needed pckmo subfunction is available.
- * These subfunctions can be enabled/disabled by customers
- * in the LPAR profile or may even change on the fly.
- */
+ /* Did we already check for PCKMO ? */
+ if (!pckmo_functions.bytes[0]) {
+ /* no, so check now */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+ return -ENODEV;
+ }
+ /* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
@@ -2058,7 +2060,7 @@ static struct miscdevice pkey_dev = {
*/
static int __init pkey_init(void)
{
- cpacf_mask_t kmc_functions;
+ cpacf_mask_t func_mask;
/*
* The pckmo instruction should be available - even if we don't
@@ -2066,15 +2068,15 @@ static int __init pkey_init(void)
* is also the minimum level for the kmc instructions which
* are able to work with protected keys.
*/
- if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+ if (!cpacf_query(CPACF_PCKMO, &func_mask))
return -ENODEV;
/* check for kmc instructions available */
- if (!cpacf_query(CPACF_KMC, &kmc_functions))
+ if (!cpacf_query(CPACF_KMC, &func_mask))
return -ENODEV;
- if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
- !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
- !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
+ if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
+ !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
+ !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
return -ENODEV;
pkey_debug_init();
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index e342eb86acd1..33b23884b133 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -157,11 +157,6 @@ int zcrypt_card_register(struct zcrypt_card *zc)
{
int rc;
- rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
- &zcrypt_card_attr_group);
- if (rc)
- return rc;
-
spin_lock(&zcrypt_list_lock);
list_add_tail(&zc->list, &zcrypt_card_list);
spin_unlock(&zcrypt_list_lock);
@@ -170,6 +165,14 @@ int zcrypt_card_register(struct zcrypt_card *zc)
ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+ rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ if (rc) {
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ }
+
return rc;
}
EXPORT_SYMBOL(zcrypt_card_register);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 3c207066313c..5062eae73d4a 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -180,7 +180,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
&zcrypt_queue_attr_group);
if (rc)
goto out;
- get_device(&zq->queue->ap_dev.device);
if (zq->ops->rng) {
rc = zcrypt_rng_device_add();
@@ -192,7 +191,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
out_unregister:
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
- put_device(&zq->queue->ap_dev.device);
out:
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
@@ -220,12 +218,10 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
list_del_init(&zq->list);
zcrypt_device_count--;
spin_unlock(&zcrypt_list_lock);
- zcrypt_card_put(zc);
if (zq->ops->rng)
zcrypt_rng_device_remove();
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
- put_device(&zq->queue->ap_dev.device);
- zcrypt_queue_put(zq);
+ zcrypt_card_put(zc);
}
EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index fe96ca3c88a5..26cc943d2034 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -390,7 +390,7 @@ static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
}
static struct ism_systemeid SYSTEM_EID = {
- .seid_string = "IBM-SYSZ-IBMSEID00000000",
+ .seid_string = "IBM-SYSZ-ISMSEID00000000",
.serial_number = "0000",
.type = "0000",
};
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f32da0ca529e..308bda2e9c00 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_lock();
list_for_each_entry_rcu(h,
&tmp_pg->dh_list, node) {
- /* h->sdev should always be valid */
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state = desc[0];
}
rcu_read_unlock();
@@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg->expiry = 0;
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) {
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state =
(pg->state & SCSI_ACCESS_STATE_MASK);
if (pg->pref)
@@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
spin_lock(&h->pg_lock);
pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
- h->sdev = NULL;
spin_unlock(&h->pg_lock);
if (pg) {
spin_lock_irq(&pg->lock);
@@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
kref_put(&pg->kref, release_port_group);
}
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 128583dfccf2..c8dd8588f800 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -445,7 +445,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
}
}
- if (scmd) {
+ if (scmd && hisi_hba->shost->nr_hw_queues) {
unsigned int dq_index;
u32 blk_tag;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 83ce4f11a589..8df70c92911d 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8855,7 +8855,7 @@ reinit_after_soft_reset:
/* hook into SCSI subsystem */
rc = hpsa_scsi_add_host(h);
if (rc)
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8870,6 +8870,8 @@ reinit_after_soft_reset:
HPSA_EVENT_MONITOR_INTERVAL);
return 0;
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ kfree(h->lastlogicals);
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index b1f3017b6547..29fcc44be2d5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -807,13 +807,29 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
}
/**
+ * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
+ * an adapter failure, reset, or SRP Login. Done under host lock to prevent
+ * race with SCSI command submission.
+ * @hostdata: adapter to adjust
+ * @limit: new request limit
+ */
+static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ atomic_set(&hostdata->request_limit, limit);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
* ibmvscsi_reset_host - Reset the connection to the server
* @hostdata: struct ibmvscsi_host_data to reset
*/
static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
{
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
purge_requests(hostdata, DID_ERROR);
hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
@@ -1146,13 +1162,13 @@ static void login_rsp(struct srp_event_struct *evt_struct)
dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
return;
default:
dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
return;
}
@@ -1163,7 +1179,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* This value is set rather than added to request_limit because
* request_limit could have been set to -1 by this client.
*/
- atomic_set(&hostdata->request_limit,
+ ibmvscsi_set_request_limit(hostdata,
be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
@@ -1195,13 +1211,13 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
* the login request we are just sending and login requests always
* get sent by the driver regardless of request_limit.
*/
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
dev_info(hostdata->dev, "sent SRP login\n");
@@ -1781,7 +1797,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
return;
case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
@@ -2137,12 +2153,12 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
}
hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc) {
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
dev_err(hostdata->dev, "error after %s\n", action);
}
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
scsi_unblock_requests(hostdata->host);
}
@@ -2226,7 +2242,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
init_waitqueue_head(&hostdata->work_wait_q);
hostdata->host = host;
hostdata->dev = dev;
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
if (map_persist_bufs(hostdata)) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 93230cd1982f..e4cc92bc4d94 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1740,6 +1740,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q->irq_poll_scheduled = false;
reply_q->irq_line_enable = true;
enable_irq(reply_q->os_irq);
+ /*
+ * Go for one more round of processing the
+ * reply descriptor post queue incase if HBA
+ * Firmware has posted some reply descriptors
+ * while reenabling the IRQ.
+ */
+ _base_process_reply_queue(reply_q);
}
return num_entries;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1f9005125313..b7a1dc24db38 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -554,10 +554,12 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fcport = qla_rport->fcport;
- if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
- (fcport && fcport->deleted))
+ if (!qpair || !fcport)
return -ENODEV;
+ if (!qpair->fw_started || fcport->deleted)
+ return -EBUSY;
+
vha = fcport->vha;
if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 84f4416d366f..bd8623ee156a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -1001,10 +1001,8 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
-#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-#endif
if (!vha->hw->mpi_fw_dump) {
ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
} else {
@@ -1050,10 +1048,8 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
bailout:
-#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-#endif
}
void
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index f2437a7570ce..9af50e6f94c4 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1714,15 +1714,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
- struct async_scan_data *data;
+ struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
+ mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
- return NULL;
+ goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -1733,7 +1734,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
- mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -1748,6 +1748,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
+ mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b8f573a02713..7a160b86adc6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1627,12 +1627,12 @@ start:
*/
fallthrough;
case CLKS_OFF:
- ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work);
+ if (queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work))
+ ufshcd_scsi_block_requests(hba);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -2115,10 +2115,20 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
unsigned long flags;
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT)))
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
- else
+ } else {
ret = -ETIMEDOUT;
+ dev_err(hba->dev,
+ "uic cmd 0x%x with arg3 0x%x completion timeout\n",
+ uic_cmd->command, uic_cmd->argument3);
+
+ if (!uic_cmd->cmd_active) {
+ dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
+ __func__);
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ }
+ }
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
@@ -2150,6 +2160,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
if (completion)
init_completion(&uic_cmd->done);
+ uic_cmd->cmd_active = 1;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
@@ -3807,10 +3818,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
+
+ if (!cmd->cmd_active) {
+ dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
+ __func__);
+ goto check_upmcrs;
+ }
+
ret = -ETIMEDOUT;
goto out;
}
+check_upmcrs:
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
@@ -4902,11 +4921,14 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
+ if (!hba->uic_async_done)
+ hba->active_uic_cmd->cmd_active = 0;
complete(&hba->active_uic_cmd->done);
retval = IRQ_HANDLED;
}
if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
+ hba->active_uic_cmd->cmd_active = 0;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
@@ -8906,6 +8928,7 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
+ destroy_workqueue(hba->eh_wq);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
@@ -9206,6 +9229,7 @@ out_remove_scsi_host:
exit_gating:
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
+ destroy_workqueue(hba->eh_wq);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 47eb1430274c..e0f00a42371c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -64,6 +64,7 @@ enum dev_cmd_type {
* @argument1: UIC command argument 1
* @argument2: UIC command argument 2
* @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
* @done: UIC command completion
*/
struct uic_command {
@@ -71,6 +72,7 @@ struct uic_command {
u32 argument1;
u32 argument2;
u32 argument3;
+ int cmd_active;
struct completion done;
};
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index af2126d2b2ff..8afb3f45d263 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -91,7 +91,7 @@ static struct generic_pm_domain *ti_sci_pd_xlate(
struct genpd_onecell_data *genpd_data = data;
unsigned int idx = genpdspec->args[0];
- if (genpdspec->args_count < 2)
+ if (genpdspec->args_count != 1 && genpdspec->args_count != 2)
return ERR_PTR(-EINVAL);
if (idx >= genpd_data->num_domains) {
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index b87116e9b413..7104cf17b848 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1193,7 +1193,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct gpio_chip *chip;
- enum gpio_lookup_flags lflags;
u32 cs;
/*
@@ -1259,21 +1258,9 @@ static int bcm2835_spi_setup(struct spi_device *spi)
if (!chip)
return 0;
- /*
- * Retrieve the corresponding GPIO line used for CS.
- * The inversion semantics will be handled by the GPIO core
- * code, so we pass GPIOD_OUT_LOW for "unasserted" and
- * the correct flag for inversion semantics. The SPI_CS_HIGH
- * on spi->mode cannot be checked for polarity in this case
- * as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
- */
- if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
- lflags = GPIO_ACTIVE_HIGH;
- else
- lflags = GPIO_ACTIVE_LOW;
spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
DRV_NAME,
- lflags,
+ GPIO_LOOKUP_FLAGS_DEFAULT,
GPIOD_OUT_LOW);
if (IS_ERR(spi->cs_gpiod))
return PTR_ERR(spi->cs_gpiod);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 3967afa465f0..1a08c1d584ab 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1080,12 +1080,11 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
#ifdef CONFIG_PM_SLEEP
static int dspi_suspend(struct device *dev)
{
- struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
if (dspi->irq)
disable_irq(dspi->irq);
- spi_controller_suspend(ctlr);
+ spi_controller_suspend(dspi->ctlr);
clk_disable_unprepare(dspi->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -1095,8 +1094,7 @@ static int dspi_suspend(struct device *dev)
static int dspi_resume(struct device *dev)
{
- struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
@@ -1104,7 +1102,7 @@ static int dspi_resume(struct device *dev)
ret = clk_prepare_enable(dspi->clk);
if (ret)
return ret;
- spi_controller_resume(ctlr);
+ spi_controller_resume(dspi->ctlr);
if (dspi->irq)
enable_irq(dspi->irq);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 060b1f5c9b04..4b80e27ecdbf 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1676,15 +1676,18 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_master_put;
}
- pm_runtime_enable(spi_imx->dev);
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ goto out_master_put;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret)
+ goto out_put_per;
+
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(spi_imx->dev);
-
- ret = pm_runtime_get_sync(spi_imx->dev);
- if (ret < 0) {
- dev_err(spi_imx->dev, "failed to enable clock\n");
- goto out_runtime_pm_put;
- }
+ pm_runtime_set_active(spi_imx->dev);
+ pm_runtime_enable(spi_imx->dev);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
/*
@@ -1722,8 +1725,12 @@ out_bitbang_start:
spi_imx_sdma_exit(spi_imx);
out_runtime_pm_put:
pm_runtime_dont_use_autosuspend(spi_imx->dev);
- pm_runtime_put_sync(spi_imx->dev);
+ pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(spi_imx->dev);
+
+ clk_disable_unprepare(spi_imx->clk_ipg);
+out_put_per:
+ clk_disable_unprepare(spi_imx->clk_per);
out_master_put:
spi_master_put(master);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 48ec2ee953dc..d740c4782775 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -1342,6 +1342,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
if (dev->irq && board->has_ao_fifo) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = cb_pcidas_ao_cmdtest;
s->do_cmd = cb_pcidas_ao_cmd;
s->cancel = cb_pcidas_ao_cancel;
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 5b8d0bae9ff3..b5fded15e8a6 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -293,7 +293,7 @@ static int controller_probe(struct platform_device *pdev)
regulator = devm_regulator_register(dev, &can_power_desc, &config);
if (IS_ERR(regulator)) {
err = PTR_ERR(regulator);
- goto out_reset;
+ goto out_ida;
}
/* make controller info visible to userspace */
cd->class_dev = kzalloc(sizeof(*cd->class_dev), GFP_KERNEL);
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index cfb673a52b25..0bf545849b11 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -147,12 +147,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
- int rc;
-
- rc = of_phy_register_fixed_link(priv->of_node);
- if (rc)
- return rc;
-
phy_node = of_node_get(priv->of_node);
}
if (!phy_node)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 2c16230f993c..9ebd665e5d42 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -69,15 +69,17 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
else
port = work->word1.cn38xx.ipprt;
- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
+ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
/*
* Ignore length errors on min size packets. Some
* equipment incorrectly pads packets to 64+4FCS
* instead of 60+4FCS. Note these packets still get
* counted as frame errors.
*/
- } else if (work->word2.snoip.err_code == 5 ||
- work->word2.snoip.err_code == 7) {
+ return 0;
+
+ if (work->word2.snoip.err_code == 5 ||
+ work->word2.snoip.err_code == 7) {
/*
* We received a packet with either an alignment error
* or a FCS error. This may be signalling that we are
@@ -108,7 +110,10 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
/* Port received 0xd5 preamble */
work->packet_ptr.s.addr += i + 1;
work->word1.len -= i + 5;
- } else if ((*ptr & 0xf) == 0xd) {
+ return 0;
+ }
+
+ if ((*ptr & 0xf) == 0xd) {
/* Port received 0xd preamble */
work->packet_ptr.s.addr += i;
work->word1.len -= i + 4;
@@ -118,21 +123,20 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
((*(ptr + 1) & 0xf) << 4);
ptr++;
}
- } else {
- printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- port);
- cvm_oct_free_work(work);
- return 1;
+ return 0;
}
+
+ printk_ratelimited("Port %d unknown preamble, packet dropped\n",
+ port);
+ cvm_oct_free_work(work);
+ return 1;
}
- } else {
- printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- port, work->word2.snoip.err_code);
- cvm_oct_free_work(work);
- return 1;
}
- return 0;
+ printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+ port, work->word2.snoip.err_code);
+ cvm_oct_free_work(work);
+ return 1;
}
static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 204f0b1e2739..5dea6e96ec90 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -892,6 +893,14 @@ static int cvm_oct_probe(struct platform_device *pdev)
break;
}
+ if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
+ if (of_phy_register_fixed_link(priv->of_node)) {
+ netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
+ interface, priv->port);
+ dev->netdev_ops = NULL;
+ }
+ }
+
if (!dev->netdev_ops) {
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 3a4202551cfc..9097bcbd67d8 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -179,6 +179,9 @@ struct vchiq_mmal_instance {
/* ordered workqueue to process all bulk operations */
struct workqueue_struct *bulk_wq;
+
+ /* handle for a vchiq instance */
+ struct vchiq_instance *vchiq_instance;
};
static struct mmal_msg_context *
@@ -1840,6 +1843,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
mutex_unlock(&instance->vchiq_mutex);
+ vchiq_shutdown(instance->vchiq_instance);
flush_workqueue(instance->bulk_wq);
destroy_workqueue(instance->bulk_wq);
@@ -1856,6 +1860,7 @@ EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
{
int status;
+ int err = -ENODEV;
struct vchiq_mmal_instance *instance;
static struct vchiq_instance *vchiq_instance;
struct vchiq_service_params_kernel params = {
@@ -1890,17 +1895,21 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
status = vchiq_connect(vchiq_instance);
if (status) {
pr_err("Failed to connect VCHI instance (status=%d)\n", status);
- return -EIO;
+ err = -EIO;
+ goto err_shutdown_vchiq;
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance)
- return -ENOMEM;
+ if (!instance) {
+ err = -ENOMEM;
+ goto err_shutdown_vchiq;
+ }
mutex_init(&instance->vchiq_mutex);
instance->bulk_scratch = vmalloc(PAGE_SIZE);
+ instance->vchiq_instance = vchiq_instance;
mutex_init(&instance->context_map_lock);
idr_init_base(&instance->context_map, 1);
@@ -1932,7 +1941,9 @@ err_close_services:
err_free:
vfree(instance->bulk_scratch);
kfree(instance);
- return -ENODEV;
+err_shutdown_vchiq:
+ vchiq_shutdown(vchiq_instance);
+ return err;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_init);
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
index 43b5630c0407..510edd12ed19 100644
--- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
@@ -24,7 +24,7 @@ description:
In addition, it is recommended to declare a mmc-pwrseq on SDIO host above
WFx. Without it, you may encounter issues with warm boot. The mmc-pwrseq
should be compatible with mmc-pwrseq-simple. Please consult
- Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more
+ Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more
information.
For SPI':'
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 2ffa587aefaa..ed53d0b45592 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -21,7 +21,7 @@ static void device_wakeup(struct wfx_dev *wdev)
if (!wdev->pdata.gpio_wakeup)
return;
- if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) >= 0)
+ if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0)
return;
if (wfx_api_older_than(wdev, 1, 4)) {
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 41f6a604a697..36b36ef39d05 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -31,13 +31,13 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
}
return rate->idx + 14;
}
+ // WFx only support 2GHz, else band information should be retrieved
+ // from ieee80211_tx_info
+ band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
if (rate->idx >= band->n_bitrates) {
WARN(1, "wrong rate->idx value: %d", rate->idx);
return -1;
}
- // WFx only support 2GHz, else band information should be retrieved
- // from ieee80211_tx_info
- band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
return band->bitrates[rate->idx].hw_value;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index ea84d08747df..590e6d072228 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -194,7 +194,7 @@ struct tcmu_tmr {
uint8_t tmr_type;
uint32_t tmr_cmd_cnt;
- int16_t tmr_cmd_ids[0];
+ int16_t tmr_cmd_ids[];
};
/*
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index f53bf336c0a2..6ade4a5c4840 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -200,7 +200,8 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
int name_len;
int rc;
- if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
+ if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
+ connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
/* Nil UUID to be passed to TEE environment */
uuid_copy(uuid, &uuid_null);
return 0;
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 718e010fcb04..09baef4ccc39 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -50,25 +50,25 @@ static const char serial21285_name[] = "Footbridge UART";
static bool is_enabled(struct uart_port *port, int bit)
{
- unsigned long private_data = (unsigned long)port->private_data;
+ unsigned long *private_data = (unsigned long *)&port->private_data;
- if (test_bit(bit, &private_data))
+ if (test_bit(bit, private_data))
return true;
return false;
}
static void enable(struct uart_port *port, int bit)
{
- unsigned long private_data = (unsigned long)port->private_data;
+ unsigned long *private_data = (unsigned long *)&port->private_data;
- set_bit(bit, &private_data);
+ set_bit(bit, private_data);
}
static void disable(struct uart_port *port, int bit)
{
- unsigned long private_data = (unsigned long)port->private_data;
+ unsigned long *private_data = (unsigned long *)&port->private_data;
- clear_bit(bit, &private_data);
+ clear_bit(bit, private_data);
}
#define is_tx_enabled(port) is_enabled(port, tx_enabled_bit)
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 41f4120abdf2..fa876e2c13e5 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -317,7 +317,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = tty_termios_baud_rate(termios);
- serial8250_do_set_termios(port, termios, old);
+ serial8250_do_set_termios(port, termios, NULL);
tty_termios_encode_baud_rate(termios, baud, baud);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 1044fc387691..28f22e58639c 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -522,6 +522,7 @@ config SERIAL_IMX_EARLYCON
depends on OF
select SERIAL_EARLYCON
select SERIAL_CORE_CONSOLE
+ default y if SERIAL_IMX_CONSOLE
help
If you have enabled the earlycon on the Freescale IMX
CPU you can make it the earlycon by answering Y to this option.
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index ff4b88c637d0..bd047e1f9bea 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -314,9 +314,10 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
/* Forward declare this for the dma callbacks*/
static void lpuart_dma_tx_complete(void *arg);
-static inline bool is_ls1028a_lpuart(struct lpuart_port *sport)
+static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
{
- return sport->devtype == LS1028A_LPUART;
+ return (sport->devtype == LS1021A_LPUART ||
+ sport->devtype == LS1028A_LPUART);
}
static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
@@ -1701,11 +1702,11 @@ static int lpuart32_startup(struct uart_port *port)
UARTFIFO_FIFOSIZE_MASK);
/*
- * The LS1028A has a fixed length of 16 words. Although it supports the
- * RX/TXSIZE fields their encoding is different. Eg the reference manual
- * states 0b101 is 16 words.
+ * The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
+ * Although they support the RX/TXSIZE fields, their encoding is
+ * different. Eg the reference manual states 0b101 is 16 words.
*/
- if (is_ls1028a_lpuart(sport)) {
+ if (is_layerscape_lpuart(sport)) {
sport->rxfifo_size = 16;
sport->txfifo_size = 16;
sport->port.fifosize = sport->txfifo_size;
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index b4d89e31730e..7a07e7272de1 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1280,6 +1280,9 @@ static int __init serial_txx9_init(void)
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
+ if (ret) {
+ platform_driver_unregister(&serial_txx9_plat_driver);
+ }
#endif
if (ret == 0)
goto out;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 7a4c02548fb3..9f8b9a567b35 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1515,10 +1515,12 @@ static void release_tty(struct tty_struct *tty, int idx)
tty->ops->shutdown(tty);
tty_save_termios(tty);
tty_driver_remove_tty(tty->driver, tty);
- tty->port->itty = NULL;
+ if (tty->port)
+ tty->port->itty = NULL;
if (tty->link)
tty->link->port->itty = NULL;
- tty_buffer_cancel_work(tty->port);
+ if (tty->port)
+ tty_buffer_cancel_work(tty->port);
if (tty->link)
tty_buffer_cancel_work(tty->link->port);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 0db53b5b3acf..78acc270e39a 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -743,8 +743,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
return;
if ((unsigned)value < ARRAY_SIZE(func_table)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&func_buf_lock, flags);
if (func_table[value])
puts_queue(vc, func_table[value]);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+
} else
pr_err("k_fn called with value=%d\n", value);
}
@@ -1991,13 +1996,11 @@ out:
#undef s
#undef v
-/* FIXME: This one needs untangling and locking */
+/* FIXME: This one needs untangling */
int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
{
struct kbsentry *kbs;
- char *p;
u_char *q;
- u_char __user *up;
int sz, fnw_sz;
int delta;
char *first_free, *fj, *fnw;
@@ -2023,23 +2026,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
i = array_index_nospec(kbs->kb_func, MAX_NR_FUNC);
switch (cmd) {
- case KDGKBSENT:
- sz = sizeof(kbs->kb_string) - 1; /* sz should have been
- a struct member */
- up = user_kdgkb->kb_string;
- p = func_table[i];
- if(p)
- for ( ; *p && sz; p++, sz--)
- if (put_user(*p, up++)) {
- ret = -EFAULT;
- goto reterr;
- }
- if (put_user('\0', up)) {
- ret = -EFAULT;
- goto reterr;
- }
- kfree(kbs);
- return ((p && *p) ? -EOVERFLOW : 0);
+ case KDGKBSENT: {
+ /* size should have been a struct member */
+ ssize_t len = sizeof(user_kdgkb->kb_string);
+
+ spin_lock_irqsave(&func_buf_lock, flags);
+ len = strlcpy(kbs->kb_string, func_table[i] ? : "", len);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+
+ ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string,
+ len + 1) ? -EFAULT : 0;
+
+ goto reterr;
+ }
case KDSKBSENT:
if (!perm) {
ret = -EPERM;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 9506a76f3ab6..d04a162939a4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4704,27 +4704,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
return rc;
}
-static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
-{
- int con = op->height;
- int rc;
-
-
- console_lock();
- if (vc->vc_mode != KD_TEXT)
- rc = -EINVAL;
- else if (!vc->vc_sw->con_font_copy)
- rc = -ENOSYS;
- else if (con < 0 || !vc_cons_allocated(con))
- rc = -ENOTTY;
- else if (con == vc->vc_num) /* nothing to do */
- rc = 0;
- else
- rc = vc->vc_sw->con_font_copy(vc, con);
- console_unlock();
- return rc;
-}
-
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
@@ -4735,7 +4714,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
- return con_font_copy(vc, op);
+ /* was buggy and never really used */
+ return -EINVAL;
}
return -ENOSYS;
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 0a33b8ababe3..5f61b25a9aaa 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -484,7 +484,7 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
return 0;
}
-static inline int do_fontx_ioctl(int cmd,
+static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
struct consolefontdesc __user *user_cfd,
struct console_font_op *op)
{
@@ -502,15 +502,16 @@ static inline int do_fontx_ioctl(int cmd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata;
- return con_font_op(vc_cons[fg_console].d, op);
- case GIO_FONTX: {
+ return con_font_op(vc, op);
+
+ case GIO_FONTX:
op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD;
op->width = 8;
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata;
- i = con_font_op(vc_cons[fg_console].d, op);
+ i = con_font_op(vc, op);
if (i)
return i;
cfdarg.charheight = op->height;
@@ -518,12 +519,11 @@ static inline int do_fontx_ioctl(int cmd,
if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
return -EFAULT;
return 0;
- }
}
return -EINVAL;
}
-static int vt_io_fontreset(struct console_font_op *op)
+static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
{
int ret;
@@ -537,19 +537,19 @@ static int vt_io_fontreset(struct console_font_op *op)
op->op = KD_FONT_OP_SET_DEFAULT;
op->data = NULL;
- ret = con_font_op(vc_cons[fg_console].d, op);
+ ret = con_font_op(vc, op);
if (ret)
return ret;
console_lock();
- con_set_default_unimap(vc_cons[fg_console].d);
+ con_set_default_unimap(vc);
console_unlock();
return 0;
}
static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
- struct vc_data *vc)
+ bool perm, struct vc_data *vc)
{
struct unimapdesc tmp;
@@ -557,9 +557,11 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
return -EFAULT;
switch (cmd) {
case PIO_UNIMAP:
+ if (!perm)
+ return -EPERM;
return con_set_unimap(vc, tmp.entry_ct, tmp.entries);
case GIO_UNIMAP:
- if (fg_console != vc->vc_num)
+ if (!perm && fg_console != vc->vc_num)
return -EPERM;
return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct),
tmp.entries);
@@ -582,7 +584,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
op.height = 0;
op.charcount = 256;
op.data = up;
- return con_font_op(vc_cons[fg_console].d, &op);
+ return con_font_op(vc, &op);
case GIO_FONT:
op.op = KD_FONT_OP_GET;
@@ -591,7 +593,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
op.height = 32;
op.charcount = 256;
op.data = up;
- return con_font_op(vc_cons[fg_console].d, &op);
+ return con_font_op(vc, &op);
case PIO_CMAP:
if (!perm)
@@ -607,13 +609,13 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
fallthrough;
case GIO_FONTX:
- return do_fontx_ioctl(cmd, up, &op);
+ return do_fontx_ioctl(vc, cmd, up, &op);
case PIO_FONTRESET:
if (!perm)
return -EPERM;
- return vt_io_fontreset(&op);
+ return vt_io_fontreset(vc, &op);
case PIO_SCRNMAP:
if (!perm)
@@ -639,10 +641,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
case PIO_UNIMAP:
case GIO_UNIMAP:
- if (!perm)
- return -EPERM;
-
- return do_unimap_ioctl(cmd, up, vc);
+ return do_unimap_ioctl(cmd, up, perm, vc);
default:
return -ENOIOCTLCMD;
@@ -1067,8 +1066,9 @@ struct compat_consolefontdesc {
};
static inline int
-compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
- int perm, struct console_font_op *op)
+compat_fontx_ioctl(struct vc_data *vc, int cmd,
+ struct compat_consolefontdesc __user *user_cfd,
+ int perm, struct console_font_op *op)
{
struct compat_consolefontdesc cfdarg;
int i;
@@ -1086,7 +1086,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata);
- return con_font_op(vc_cons[fg_console].d, op);
+ return con_font_op(vc, op);
+
case GIO_FONTX:
op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD;
@@ -1094,7 +1095,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata);
- i = con_font_op(vc_cons[fg_console].d, op);
+ i = con_font_op(vc, op);
if (i)
return i;
cfdarg.charheight = op->height;
@@ -1184,7 +1185,7 @@ long vt_compat_ioctl(struct tty_struct *tty,
*/
case PIO_FONTX:
case GIO_FONTX:
- return compat_fontx_ioctl(cmd, up, perm, &op);
+ return compat_fontx_ioctl(vc, cmd, up, perm, &op);
case KDFONTOP:
return compat_kdfontop_ioctl(up, perm, &op, vc);
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index 4761c852d9c4..d3121a32cc68 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -137,48 +137,36 @@ static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
enum usb_device_state device_state = priv_dev->gadget.state;
- struct cdns3_endpoint *priv_ep;
u32 config = le16_to_cpu(ctrl_req->wValue);
int result = 0;
- int i;
switch (device_state) {
case USB_STATE_ADDRESS:
- /* Configure non-control EPs */
- for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
- priv_ep = priv_dev->eps[i];
- if (!priv_ep)
- continue;
-
- if (priv_ep->flags & EP_CLAIMED)
- cdns3_ep_config(priv_ep);
- }
-
result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
- if (result)
- return result;
-
- if (!config) {
- cdns3_hw_reset_eps_config(priv_dev);
- usb_gadget_set_state(&priv_dev->gadget,
- USB_STATE_ADDRESS);
- }
+ if (result || !config)
+ goto reset_config;
break;
case USB_STATE_CONFIGURED:
result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
+ if (!config && !result)
+ goto reset_config;
- if (!config && !result) {
- cdns3_hw_reset_eps_config(priv_dev);
- usb_gadget_set_state(&priv_dev->gadget,
- USB_STATE_ADDRESS);
- }
break;
default:
- result = -EINVAL;
+ return -EINVAL;
}
+ return 0;
+
+reset_config:
+ if (result != USB_GADGET_DELAYED_STATUS)
+ cdns3_hw_reset_eps_config(priv_dev);
+
+ usb_gadget_set_state(&priv_dev->gadget,
+ USB_STATE_ADDRESS);
+
return result;
}
@@ -705,6 +693,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
unsigned long flags;
int ret = 0;
u8 zlp = 0;
+ int i;
spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_ep0_queue(priv_dev, request);
@@ -720,6 +709,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
u32 val;
cdns3_select_ep(priv_dev, 0x00);
+
+ /*
+ * Configure all non-control EPs which are not enabled by class driver
+ */
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
+ priv_ep = priv_dev->eps[i];
+ if (priv_ep && priv_ep->flags & EP_CLAIMED &&
+ !(priv_ep->flags & EP_ENABLED))
+ cdns3_ep_config(priv_ep, 0);
+ }
+
cdns3_set_hw_configuration(priv_dev);
cdns3_ep0_complete_setup(priv_dev, 0, 1);
/* wait until configuration set */
@@ -811,6 +811,7 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
struct cdns3_usb_regs __iomem *regs;
struct cdns3_endpoint *priv_ep;
u32 max_packet_size = 64;
+ u32 ep_cfg;
regs = priv_dev->regs;
@@ -842,8 +843,10 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
BIT(0) | BIT(16));
}
- writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
- &regs->ep_cfg);
+ ep_cfg = EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size);
+
+ if (!(priv_ep->flags & EP_CONFIGURED))
+ writel(ep_cfg, &regs->ep_cfg);
writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN,
&regs->ep_sts_en);
@@ -851,8 +854,10 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
/* init ep in */
cdns3_select_ep(priv_dev, USB_DIR_IN);
- writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
- &regs->ep_cfg);
+ if (!(priv_ep->flags & EP_CONFIGURED))
+ writel(ep_cfg, &regs->ep_cfg);
+
+ priv_ep->flags |= EP_CONFIGURED;
writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, &regs->ep_sts_en);
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 6e7b70a2e352..66c1e6723eb1 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -296,6 +296,8 @@ static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
*/
void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
{
+ int i;
+
writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
cdns3_allow_enable_l1(priv_dev, 0);
@@ -304,6 +306,10 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
priv_dev->out_mem_is_allocated = 0;
priv_dev->wait_for_setup = 0;
priv_dev->using_streams = 0;
+
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
+ if (priv_dev->eps[i])
+ priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
}
/**
@@ -506,7 +512,6 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
int chunk_end;
- int length;
descmiss_priv_req =
cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
@@ -517,7 +522,6 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
break;
chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
- length = request->actual + descmiss_req->actual;
request->status = descmiss_req->status;
__cdns3_descmiss_copy_data(request, descmiss_req);
list_del_init(&descmiss_priv_req->list);
@@ -1746,11 +1750,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
{
- if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) {
- spin_unlock(&priv_dev->lock);
+ if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect)
priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
- spin_lock(&priv_dev->lock);
- }
}
/**
@@ -1761,6 +1762,7 @@ static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
*/
static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
u32 usb_ists)
+__must_hold(&priv_dev->lock)
{
int speed = 0;
@@ -1785,7 +1787,9 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
/* Disconnection detected */
if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
+ spin_unlock(&priv_dev->lock);
cdns3_disconnect_gadget(priv_dev);
+ spin_lock(&priv_dev->lock);
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
cdns3_hw_reset_eps_config(priv_dev);
@@ -1979,27 +1983,6 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
-static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep)
-{
- if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
- return;
-
- if (priv_dev->dev_ver >= DEV_VER_V3) {
- u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
-
- /*
- * Stream capable endpoints are handled by using ep_tdl
- * register. Other endpoints use TDL from TRB feature.
- */
- cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
- }
-
- /* Enable Stream Bit TDL chk and SID chk */
- cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
- EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
-}
-
static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
@@ -2037,8 +2020,9 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
/**
* cdns3_ep_config Configure hardware endpoint
* @priv_ep: extended endpoint object
+ * @enable: set EP_CFG_ENABLE bit in ep_cfg register.
*/
-void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
+int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
{
bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
@@ -2099,7 +2083,7 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
break;
default:
/* all other speed are not supported */
- return;
+ return -EINVAL;
}
if (max_packet_size == 1024)
@@ -2109,11 +2093,33 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
else
priv_ep->trb_burst_size = 16;
- ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
- !!priv_ep->dir);
- if (ret) {
- dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
- return;
+ /* onchip buffer is only allocated before configuration */
+ if (!priv_dev->hw_configured_flag) {
+ ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
+ !!priv_ep->dir);
+ if (ret) {
+ dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
+ return ret;
+ }
+ }
+
+ if (enable)
+ ep_cfg |= EP_CFG_ENABLE;
+
+ if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) {
+ if (priv_dev->dev_ver >= DEV_VER_V3) {
+ u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+
+ /*
+ * Stream capable endpoints are handled by using ep_tdl
+ * register. Other endpoints use TDL from TRB feature.
+ */
+ cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
+ mask);
+ }
+
+ /* Enable Stream Bit TDL chk and SID chk */
+ ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
}
ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
@@ -2123,9 +2129,12 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
cdns3_select_ep(priv_dev, bEndpointAddress);
writel(ep_cfg, &priv_dev->regs->ep_cfg);
+ priv_ep->flags |= EP_CONFIGURED;
dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
priv_ep->name, ep_cfg);
+
+ return 0;
}
/* Find correct direction for HW endpoint according to description */
@@ -2266,7 +2275,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
u32 bEndpointAddress;
unsigned long flags;
int enable = 1;
- int ret;
+ int ret = 0;
int val;
priv_ep = ep_to_cdns3_ep(ep);
@@ -2305,6 +2314,17 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
bEndpointAddress = priv_ep->num | priv_ep->dir;
cdns3_select_ep(priv_dev, bEndpointAddress);
+ /*
+ * For some versions of controller at some point during ISO OUT traffic
+ * DMA reads Transfer Ring for the EP which has never got doorbell.
+ * This issue was detected only on simulation, but to avoid this issue
+ * driver add protection against it. To fix it driver enable ISO OUT
+ * endpoint before setting DRBL. This special treatment of ISO OUT
+ * endpoints are recommended by controller specification.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
+ enable = 0;
+
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
/*
* Enable stream support (SS mode) related interrupts
@@ -2315,13 +2335,17 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
EP_STS_EN_STREAMREN;
priv_ep->use_streams = true;
- cdns3_stream_ep_reconfig(priv_dev, priv_ep);
+ ret = cdns3_ep_config(priv_ep, enable);
priv_dev->using_streams |= true;
}
+ } else {
+ ret = cdns3_ep_config(priv_ep, enable);
}
- ret = cdns3_allocate_trb_pool(priv_ep);
+ if (ret)
+ goto exit;
+ ret = cdns3_allocate_trb_pool(priv_ep);
if (ret)
goto exit;
@@ -2351,20 +2375,6 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
writel(reg, &priv_dev->regs->ep_sts_en);
- /*
- * For some versions of controller at some point during ISO OUT traffic
- * DMA reads Transfer Ring for the EP which has never got doorbell.
- * This issue was detected only on simulation, but to avoid this issue
- * driver add protection against it. To fix it driver enable ISO OUT
- * endpoint before setting DRBL. This special treatment of ISO OUT
- * endpoints are recommended by controller specification.
- */
- if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
- enable = 0;
-
- if (enable)
- cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
-
ep->desc = desc;
priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
@@ -3265,10 +3275,13 @@ err0:
}
static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
+__must_hold(&cdns->lock)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
+ spin_unlock(&cdns->lock);
cdns3_disconnect_gadget(priv_dev);
+ spin_lock(&cdns->lock);
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index 1ccecd237530..21fa461c518e 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -1072,7 +1072,7 @@ struct cdns3_trb {
#define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17)
/* transfer_len bitmasks - bits 31:24 */
-#define TRB_BURST_LEN(p) (((p) << 24) & GENMASK(31, 24))
+#define TRB_BURST_LEN(p) ((unsigned int)((p) << 24) & GENMASK(31, 24))
#define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24)
/* Data buffer pointer bitmasks*/
@@ -1159,6 +1159,7 @@ struct cdns3_endpoint {
#define EP_QUIRK_EXTRA_BUF_DET BIT(12)
#define EP_QUIRK_EXTRA_BUF_EN BIT(13)
#define EP_TDLCHK_EN BIT(15)
+#define EP_CONFIGURED BIT(16)
u32 flags;
struct cdns3_request *descmis_req;
@@ -1360,7 +1361,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
int cdns3_init_ep0(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep);
void cdns3_ep0_config(struct cdns3_device *priv_dev);
-void cdns3_ep_config(struct cdns3_endpoint *priv_ep);
+int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable);
void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir);
int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 30ef946a8e1a..1e7568867910 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -508,6 +508,7 @@ static void acm_read_bulk_callback(struct urb *urb)
"%s - cooling babbling device\n", __func__);
usb_mark_last_busy(acm->dev);
set_bit(rb->index, &acm->urbs_in_error_delay);
+ set_bit(ACM_ERROR_DELAY, &acm->flags);
cooldown = true;
break;
default:
@@ -533,7 +534,7 @@ static void acm_read_bulk_callback(struct urb *urb)
if (stopped || stalled || cooldown) {
if (stalled)
- schedule_work(&acm->work);
+ schedule_delayed_work(&acm->dwork, 0);
else if (cooldown)
schedule_delayed_work(&acm->dwork, HZ / 2);
return;
@@ -563,13 +564,13 @@ static void acm_write_bulk(struct urb *urb)
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
set_bit(EVENT_TTY_WAKEUP, &acm->flags);
- schedule_work(&acm->work);
+ schedule_delayed_work(&acm->dwork, 0);
}
static void acm_softint(struct work_struct *work)
{
int i;
- struct acm *acm = container_of(work, struct acm, work);
+ struct acm *acm = container_of(work, struct acm, dwork.work);
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
smp_mb(); /* against acm_suspend() */
@@ -585,7 +586,7 @@ static void acm_softint(struct work_struct *work)
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
for (i = 0; i < acm->rx_buflimit; i++)
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
- acm_submit_read_urb(acm, i, GFP_NOIO);
+ acm_submit_read_urb(acm, i, GFP_KERNEL);
}
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
@@ -1351,7 +1352,6 @@ made_compressed_probe:
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
- INIT_WORK(&acm->work, acm_softint);
INIT_DELAYED_WORK(&acm->dwork, acm_softint);
init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
@@ -1561,7 +1561,6 @@ static void acm_disconnect(struct usb_interface *intf)
}
acm_kill_urbs(acm);
- cancel_work_sync(&acm->work);
cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1604,7 +1603,6 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
acm_kill_urbs(acm);
- cancel_work_sync(&acm->work);
cancel_delayed_work_sync(&acm->dwork);
acm->urbs_in_error_delay = 0;
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 9dce179d031b..8aef5eb769a0 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,8 +112,7 @@ struct acm {
# define ACM_ERROR_DELAY 3
unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
struct usb_cdc_line_coding line; /* bits, stop, parity */
- struct work_struct work; /* work queue entry for various purposes*/
- struct delayed_work dwork; /* for cool downs needed in error recovery */
+ struct delayed_work dwork; /* work queue entry for various purposes */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
unsigned int ctrlout; /* output control lines (DTR, RTS) */
struct async_icount iocount; /* counters for control line changes */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 98b7449c11f3..4dfa44d6cc3c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -839,6 +839,22 @@ const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
return NULL;
}
+bool usb_driver_applicable(struct usb_device *udev,
+ struct usb_device_driver *udrv)
+{
+ if (udrv->id_table && udrv->match)
+ return usb_device_match_id(udev, udrv->id_table) != NULL &&
+ udrv->match(udev);
+
+ if (udrv->id_table)
+ return usb_device_match_id(udev, udrv->id_table) != NULL;
+
+ if (udrv->match)
+ return udrv->match(udev);
+
+ return false;
+}
+
static int usb_device_match(struct device *dev, struct device_driver *drv)
{
/* devices and interfaces are handled separately */
@@ -853,17 +869,14 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
udev = to_usb_device(dev);
udrv = to_usb_device_driver(drv);
- if (udrv->id_table)
- return usb_device_match_id(udev, udrv->id_table) != NULL;
-
- if (udrv->match)
- return udrv->match(udev);
-
/* If the device driver under consideration does not have a
* id_table or a match function, then let the driver's probe
* function decide.
*/
- return 1;
+ if (!udrv->id_table && !udrv->match)
+ return 1;
+
+ return usb_driver_applicable(udev, udrv);
} else if (is_usb_interface(dev)) {
struct usb_interface *intf;
@@ -941,8 +954,7 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
return 0;
udev = to_usb_device(dev);
- if (usb_device_match_id(udev, new_udriver->id_table) == NULL &&
- (!new_udriver->match || new_udriver->match(udev) == 0))
+ if (!usb_driver_applicable(udev, new_udriver))
return 0;
ret = device_reprobe(dev);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 22c887f5c497..26f9fb9f67ca 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -205,9 +205,7 @@ static int __check_for_non_generic_match(struct device_driver *drv, void *data)
udrv = to_usb_device_driver(drv);
if (udrv == &usb_generic_driver)
return 0;
- if (usb_device_match_id(udev, udrv->id_table) != NULL)
- return 1;
- return (udrv->match && udrv->match(udev));
+ return usb_driver_applicable(udev, udrv);
}
static bool usb_generic_driver_match(struct usb_device *udev)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 10574fa3f927..a1e3a037a289 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Kingston DataTraveler 3.0 */
+ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c893f54a3420..82538daac8b8 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -74,6 +74,8 @@ extern int usb_match_device(struct usb_device *dev,
const struct usb_device_id *id);
extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
const struct usb_device_id *id);
+extern bool usb_driver_applicable(struct usb_device *udev,
+ struct usb_device_driver *udrv);
extern void usb_forced_unbind_intf(struct usb_interface *intf);
extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index e2820676beb1..5f18acac7406 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -608,10 +608,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
error_debugfs:
dwc2_debugfs_exit(hsotg);
if (hsotg->hcd_enabled)
dwc2_hcd_remove(hsotg);
+#endif
error_drd:
dwc2_drd_exit(hsotg);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index bdf0925da6b6..841daec70b6e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* core.c - DesignWare USB3 DRD Controller Core file
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 74323b10a64a..2f95f08ca511 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1277,7 +1277,7 @@ struct dwc3_event_type {
#define DWC3_DEPEVT_EPCMDCMPLT 0x07
/**
- * struct dwc3_event_depvt - Device Endpoint Events
+ * struct dwc3_event_depevt - Device Endpoint Events
* @one_bit: indicates this is an endpoint event (not used)
* @endpoint_number: number of the endpoint
* @endpoint_event: The event we have:
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 242b6210380a..bae6a70664c8 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -40,6 +40,7 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
+#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -367,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
(kernel_ulong_t) &dwc3_pci_amd_properties, },
{ } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 7be3903cb842..8b668ef46f7f 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1058,10 +1058,11 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
{
unsigned int direction = !dwc->ep0_expect_in;
+ dwc->delayed_status = false;
+
if (dwc->ep0state != EP0_STATUS_PHASE)
return;
- dwc->delayed_status = false;
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 05b176c82cc5..c6d455f2bb92 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1245,7 +1245,7 @@ int usb_string_id(struct usb_composite_dev *cdev)
EXPORT_SYMBOL_GPL(usb_string_id);
/**
- * usb_string_ids() - allocate unused string IDs in batch
+ * usb_string_ids_tab() - allocate unused string IDs in batch
* @cdev: the device whose string descriptor IDs are being allocated
* @str: an array of usb_string objects to assign numbers to
* Context: single threaded during gadget setup
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index e01e366d89cd..062dfac30399 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -564,9 +564,12 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
return -ENODEV;
}
length = min(arg.length, event->length);
- if (copy_to_user((void __user *)value, event, sizeof(*event) + length))
+ if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
+ kfree(event);
return -EFAULT;
+ }
+ kfree(event);
return 0;
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index de528e3b0662..ad6ff9c4188e 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1051,7 +1051,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
u32 bitmask;
struct ep_queue_head *qh;
- if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
+ if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
ep = container_of(_ep, struct fsl_ep, ep);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 25c1d6ab5adb..3e1267d38774 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1760,6 +1760,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ pci_set_drvdata(pdev, dev);
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
@@ -1793,7 +1794,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->regs = (struct goku_udc_regs __iomem *) base;
- pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e077b2ca53c5..869d9c4de5fc 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -479,8 +479,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
u_phy->otg->host = hcd_to_bus(hcd);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto cleanup_phy;
}
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index ae8f60f6e6a5..44a7e58a26e3 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -94,10 +94,13 @@ static struct platform_device *fsl_usb2_device_register(
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
- if (!pdev->dev.dma_mask)
+ if (!pdev->dev.dma_mask) {
pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
- else
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto error;
+ }
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (retval)
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fe405cd38dbc..138ba4528dd3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2252,8 +2252,8 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
if (!rhub->num_ports)
return;
- rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
- dev_to_node(dev));
+ rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
+ flags, dev_to_node(dev));
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c26c06e5c88c..bf89172c43ca 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,6 +23,8 @@
#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
+#define SPARSE_DISABLE_BIT 17
+#define SPARSE_CNTL_ENABLE 0xC12C
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
@@ -161,6 +163,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
+ xhci->quirks |= XHCI_DISABLE_SPARSE;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -498,6 +503,15 @@ static void xhci_pme_quirk(struct usb_hcd *hcd)
readl(reg);
}
+static void xhci_sparse_control_quirk(struct usb_hcd *hcd)
+{
+ u32 reg;
+
+ reg = readl(hcd->regs + SPARSE_CNTL_ENABLE);
+ reg &= ~BIT(SPARSE_DISABLE_BIT);
+ writel(reg, hcd->regs + SPARSE_CNTL_ENABLE);
+}
+
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -517,6 +531,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
xhci_ssic_port_unused_quirk(hcd, true);
+ if (xhci->quirks & XHCI_DISABLE_SPARSE)
+ xhci_sparse_control_quirk(hcd);
+
ret = xhci_suspend(xhci, do_wakeup);
if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
xhci_ssic_port_unused_quirk(hcd, false);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 482fe8c5e3b4..d4a8d0efbbc4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3533,11 +3533,14 @@ static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
udev->slot_id, ep_index);
vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
- xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
}
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
+ }
/* Subtract 1 for stream 0, which drivers can't use */
return num_streams - 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8be88379c0fb..ebb359ebb261 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1877,6 +1877,7 @@ struct xhci_hcd {
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
+#define XHCI_DISABLE_SPARSE BIT_ULL(38)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
index b403094a6b3a..9de0171b5177 100644
--- a/drivers/usb/misc/apple-mfi-fastcharge.c
+++ b/drivers/usb/misc/apple-mfi-fastcharge.c
@@ -120,8 +120,10 @@ static int apple_mfi_fc_set_property(struct power_supply *psy,
dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(&mfi->udev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&mfi->udev->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -163,17 +165,23 @@ static const struct power_supply_desc apple_mfi_fc_desc = {
.property_is_writeable = apple_mfi_fc_property_is_writeable
};
+static bool mfi_fc_match(struct usb_device *udev)
+{
+ int idProduct;
+
+ idProduct = le16_to_cpu(udev->descriptor.idProduct);
+ /* See comment above mfi_fc_id_table[] */
+ return (idProduct >= 0x1200 && idProduct <= 0x12ff);
+}
+
static int mfi_fc_probe(struct usb_device *udev)
{
struct power_supply_config battery_cfg = {};
struct mfi_device *mfi = NULL;
- int err, idProduct;
+ int err;
- idProduct = le16_to_cpu(udev->descriptor.idProduct);
- /* See comment above mfi_fc_id_table[] */
- if (idProduct < 0x1200 || idProduct > 0x12ff) {
+ if (!mfi_fc_match(udev))
return -ENODEV;
- }
mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL);
if (!mfi) {
@@ -220,6 +228,7 @@ static struct usb_device_driver mfi_fc_driver = {
.probe = mfi_fc_probe,
.disconnect = mfi_fc_disconnect,
.id_table = mfi_fc_id_table,
+ .match = mfi_fc_match,
.generic_subclass = 1,
};
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 1de5c9a1d20a..38f17d66d5bc 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -564,6 +564,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&mtu->lock, flags);
+ synchronize_irq(mtu->irq);
return 0;
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 821970609695..2e40908963da 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
struct device *dev = &port->dev;
int status = urb->status;
unsigned long flags;
+ bool resubmitted = false;
- set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
+ set_bit(0, &port->write_urbs_free);
return;
}
@@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
goto exit;
}
+ resubmitted = true;
+
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
@@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
exit:
spin_unlock_irqrestore(&priv->lock, flags);
+ if (!resubmitted)
+ set_bit(0, &port->write_urbs_free);
usb_serial_port_softint(port);
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2a3bfd6f867e..54ca85cc920d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index b069a5122aaa..cf720e944aaa 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -71,7 +71,7 @@ struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
/**
- * typec_put_switch - Release USB Type-C orientation switch
+ * typec_switch_put - Release USB Type-C orientation switch
* @sw: USB Type-C orientation switch
*
* Decrement reference count for @sw.
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index ce0bd7b3ad88..2a618f02f4f1 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -544,11 +544,10 @@ static int stusb160x_get_fw_caps(struct stusb160x *chip,
*/
ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
if (!ret) {
- chip->port_type = typec_find_port_power_role(cap_str);
- if (chip->port_type < 0) {
- ret = chip->port_type;
+ ret = typec_find_port_power_role(cap_str);
+ if (ret < 0)
return ret;
- }
+ chip->port_type = ret;
}
chip->capability.type = chip->port_type;
@@ -565,15 +564,13 @@ static int stusb160x_get_fw_caps(struct stusb160x *chip,
*/
ret = fwnode_property_read_string(fwnode, "power-opmode", &cap_str);
if (!ret) {
- chip->pwr_opmode = typec_find_pwr_opmode(cap_str);
+ ret = typec_find_pwr_opmode(cap_str);
/* Power delivery not yet supported */
- if (chip->pwr_opmode < 0 ||
- chip->pwr_opmode == TYPEC_PWR_MODE_PD) {
- ret = chip->pwr_opmode < 0 ? chip->pwr_opmode : -EINVAL;
- dev_err(chip->dev, "bad power operation mode: %d\n",
- chip->pwr_opmode);
- return ret;
+ if (ret < 0 || ret == TYPEC_PWR_MODE_PD) {
+ dev_err(chip->dev, "bad power operation mode: %d\n", ret);
+ return -EINVAL;
}
+ chip->pwr_opmode = ret;
}
return 0;
@@ -632,6 +629,7 @@ static const struct of_device_id stusb160x_of_match[] = {
{ .compatible = "st,stusb1600", .data = &stusb1600_regmap_config},
{},
};
+MODULE_DEVICE_TABLE(of, stusb160x_of_match);
static int stusb160x_probe(struct i2c_client *client)
{
@@ -729,8 +727,8 @@ static int stusb160x_probe(struct i2c_client *client)
}
chip->port = typec_register_port(chip->dev, &chip->capability);
- if (!chip->port) {
- ret = -ENODEV;
+ if (IS_ERR(chip->port)) {
+ ret = PTR_ERR(chip->port);
goto all_reg_disable;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 55535c4f66bf..a6fae1f86505 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -2890,6 +2890,9 @@ static void tcpm_reset_port(struct tcpm_port *port)
static void tcpm_detach(struct tcpm_port *port)
{
+ if (tcpm_port_is_disconnected(port))
+ port->hard_reset_count = 0;
+
if (!port->attached)
return;
@@ -2898,9 +2901,6 @@ static void tcpm_detach(struct tcpm_port *port)
port->tcpc->set_bist_data(port->tcpc, false);
}
- if (tcpm_port_is_disconnected(port))
- port->hard_reset_count = 0;
-
tcpm_reset_port(port);
}
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index ef1c550f8266..4b6195666c58 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -239,7 +239,6 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 paend;
struct scatterlist *sg;
struct device *dma = mvdev->mdev->device;
- int ret;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -277,8 +276,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
- ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!ret)
+ err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!err)
goto err_map;
err = create_direct_mr(mvdev, mr);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 2629911c29bb..6a90fdb9cbfc 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -38,6 +38,10 @@ static int batch_mapping = 1;
module_param(batch_mapping, int, 0444);
MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
+static char *macaddr;
+module_param(macaddr, charp, 0);
+MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
+
struct vdpasim_virtqueue {
struct vringh vring;
struct vringh_kiov iov;
@@ -60,7 +64,8 @@ struct vdpasim_virtqueue {
static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
(1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM);
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
+ (1ULL << VIRTIO_NET_F_MAC);
/* State of each vdpasim device */
struct vdpasim {
@@ -361,7 +366,9 @@ static struct vdpasim *vdpasim_create(void)
spin_lock_init(&vdpasim->iommu_lock);
dev = &vdpasim->vdpa.dev;
- dev->coherent_dma_mask = DMA_BIT_MASK(64);
+ dev->dma_mask = &dev->coherent_dma_mask;
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ goto err_iommu;
set_dma_ops(dev, &vdpasim_dma_ops);
vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
@@ -372,7 +379,15 @@ static struct vdpasim *vdpasim_create(void)
if (!vdpasim->buffer)
goto err_iommu;
- eth_random_addr(vdpasim->config.mac);
+ if (macaddr) {
+ mac_pton(macaddr, vdpasim->config.mac);
+ if (!is_valid_ether_addr(vdpasim->config.mac)) {
+ ret = -EADDRNOTAVAIL;
+ goto err_iommu;
+ }
+ } else {
+ eth_random_addr(vdpasim->config.mac);
+ }
vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
@@ -574,6 +589,16 @@ static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
return vdpasim->generation;
}
+static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
+{
+ struct vdpa_iova_range range = {
+ .first = 0ULL,
+ .last = ULLONG_MAX,
+ };
+
+ return range;
+}
+
static int vdpasim_set_map(struct vdpa_device *vdpa,
struct vhost_iotlb *iotlb)
{
@@ -657,6 +682,7 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
+ .get_iova_range = vdpasim_get_iova_range,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
@@ -683,6 +709,7 @@ static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
+ .get_iova_range = vdpasim_get_iova_range,
.set_map = vdpasim_set_map,
.free = vdpasim_free,
};
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 0113a980f974..f27e25112c40 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -248,7 +248,9 @@ static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
info.size = vdev->regions[info.index].size;
info.flags = vdev->regions[info.index].flags;
- return copy_to_user((void __user *)arg, &info, minsz);
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+ return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
@@ -267,7 +269,9 @@ static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
info.flags = VFIO_IRQ_INFO_EVENTFD;
info.count = 1;
- return copy_to_user((void __user *)arg, &info, minsz);
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+ return 0;
}
case VFIO_DEVICE_SET_IRQS:
{
@@ -468,7 +472,7 @@ static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
{
struct vfio_fsl_mc_device *vdev = device_data;
struct fsl_mc_device *mc_dev = vdev->mc_dev;
- int index;
+ unsigned int index;
index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index c80dceb46f79..0d9f3002df7f 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -13,7 +13,7 @@
#include "linux/fsl/mc.h"
#include "vfio_fsl_mc_private.h"
-int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev)
+static int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
struct vfio_fsl_mc_irq *mc_irq;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index fbd2b3404184..e6190173482c 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -385,7 +385,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
- if (ret) {
+ if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
goto disable_exit;
}
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 9e353c484ace..a0b5fc8e46f4 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -356,34 +356,60 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
return done;
}
-static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
+static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
+ bool test_mem)
{
- struct vfio_pci_ioeventfd *ioeventfd = opaque;
-
switch (ioeventfd->count) {
case 1:
- vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
case 2:
- vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
case 4:
- vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
- vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
#endif
}
+}
+
+static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
+{
+ struct vfio_pci_ioeventfd *ioeventfd = opaque;
+ struct vfio_pci_device *vdev = ioeventfd->vdev;
+
+ if (ioeventfd->test_mem) {
+ if (!down_read_trylock(&vdev->memory_lock))
+ return 1; /* Lock contended, use thread */
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ up_read(&vdev->memory_lock);
+ return 0;
+ }
+ }
+
+ vfio_pci_ioeventfd_do_write(ioeventfd, false);
+
+ if (ioeventfd->test_mem)
+ up_read(&vdev->memory_lock);
return 0;
}
+static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
+{
+ struct vfio_pci_ioeventfd *ioeventfd = opaque;
+
+ vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
+}
+
long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
uint64_t data, int count, int fd)
{
@@ -457,7 +483,8 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
- NULL, NULL, &ioeventfd->virqfd, fd);
+ vfio_pci_ioeventfd_thread, NULL,
+ &ioeventfd->virqfd, fd);
if (ret) {
kfree(ioeventfd);
goto out_unlock;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index c0771a9567fb..fb4b385191f2 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data)
ret = pm_runtime_get_sync(vdev->device);
if (ret < 0)
- goto err_pm;
+ goto err_rst;
ret = vfio_platform_call_reset(vdev, &extra_dbg);
if (ret && vdev->reset_required) {
@@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data)
err_rst:
pm_runtime_put(vdev->device);
-err_pm:
vfio_platform_irq_cleanup(vdev);
err_irq:
vfio_platform_regions_cleanup(vdev);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index bb2684cc245e..67e827638995 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1993,6 +1993,7 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -2009,18 +2010,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
mutex_lock(&iommu->lock);
- list_for_each_entry(d, &iommu->domain_list, next) {
- if (find_iommu_group(d, iommu_group)) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
- }
-
- if (iommu->external_domain) {
- if (find_iommu_group(iommu->external_domain, iommu_group)) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
+ /* Check for duplicates */
+ if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
+ mutex_unlock(&iommu->lock);
+ return -EINVAL;
}
group = kzalloc(sizeof(*group), GFP_KERNEL);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index a2dbc85e0b0d..2754f3069738 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -47,6 +47,7 @@ struct vhost_vdpa {
int minor;
struct eventfd_ctx *config_ctx;
int in_batch;
+ struct vdpa_iova_range range;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -103,6 +104,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ if (unlikely(ret))
+ dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
+ qid, vq->call_ctx.producer.token, ret);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
@@ -337,6 +341,16 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vhost_vdpa_iova_range range = {
+ .first = v->range.first,
+ .last = v->range.last,
+ };
+
+ return copy_to_user(argp, &range, sizeof(range));
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -421,12 +435,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
u64 features;
- long r;
+ long r = 0;
if (cmd == VHOST_SET_BACKEND_FEATURES) {
- r = copy_from_user(&features, featurep, sizeof(features));
- if (r)
- return r;
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
if (features & ~VHOST_VDPA_BACKEND_FEATURES)
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
@@ -469,7 +482,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
- r = copy_to_user(featurep, &features, sizeof(features));
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_IOVA_RANGE:
+ r = vhost_vdpa_get_iova_range(v, argp);
break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
@@ -588,19 +605,25 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
- struct vm_area_struct **vmas;
+ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
- unsigned long map_pfn, last_pfn = 0;
- unsigned long npages, lock_limit;
- unsigned long i, nmap = 0;
+ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+ unsigned long locked, lock_limit, pinned, i;
u64 iova = msg->iova;
- long pinned;
int ret = 0;
+ if (msg->iova < v->range.first ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
if (vhost_iotlb_itree_first(iotlb, msg->iova,
msg->iova + msg->size - 1))
return -EEXIST;
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
if (msg->perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
@@ -608,86 +631,61 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (!npages)
return -EINVAL;
- page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
- GFP_KERNEL);
- if (!page_list || !vmas) {
- ret = -ENOMEM;
- goto free;
- }
-
mmap_read_lock(dev->mm);
+ locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
- ret = -ENOMEM;
- goto unlock;
- }
- pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
- page_list, vmas);
- if (npages != pinned) {
- if (pinned < 0) {
- ret = pinned;
- } else {
- unpin_user_pages(page_list, pinned);
- ret = -ENOMEM;
- }
- goto unlock;
+ if (locked > lock_limit) {
+ ret = -ENOMEM;
+ goto out;
}
+ cur_base = msg->uaddr & PAGE_MASK;
iova &= PAGE_MASK;
- map_pfn = page_to_pfn(page_list[0]);
-
- /* One more iteration to avoid extra vdpa_map() call out of loop. */
- for (i = 0; i <= npages; i++) {
- unsigned long this_pfn;
- u64 csize;
-
- /* The last chunk may have no valid PFN next to it */
- this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
-
- if (last_pfn && (this_pfn == -1UL ||
- this_pfn != last_pfn + 1)) {
- /* Pin a contiguous chunk of memory */
- csize = last_pfn - map_pfn + 1;
- ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT,
- msg->perm);
- if (ret) {
- /*
- * Unpin the rest chunks of memory on the
- * flight with no corresponding vdpa_map()
- * calls having been made yet. On the other
- * hand, vdpa_unmap() in the failure path
- * is in charge of accounting the number of
- * pinned pages for its own.
- * This asymmetrical pattern of accounting
- * is for efficiency to pin all pages at
- * once, while there is no other callsite
- * of vdpa_map() than here above.
- */
- unpin_user_pages(&page_list[nmap],
- npages - nmap);
- goto out;
+
+ while (npages) {
+ pinned = min_t(unsigned long, npages, list_size);
+ ret = pin_user_pages(cur_base, pinned,
+ gup_flags, page_list, NULL);
+ if (ret != pinned)
+ goto out;
+
+ if (!last_pfn)
+ map_pfn = page_to_pfn(page_list[0]);
+
+ for (i = 0; i < ret; i++) {
+ unsigned long this_pfn = page_to_pfn(page_list[i]);
+ u64 csize;
+
+ if (last_pfn && (this_pfn != last_pfn + 1)) {
+ /* Pin a contiguous chunk of memory */
+ csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ if (vhost_vdpa_map(v, iova, csize,
+ map_pfn << PAGE_SHIFT,
+ msg->perm))
+ goto out;
+ map_pfn = this_pfn;
+ iova += csize;
}
- atomic64_add(csize, &dev->mm->pinned_vm);
- nmap += csize;
- iova += csize << PAGE_SHIFT;
- map_pfn = this_pfn;
+
+ last_pfn = this_pfn;
}
- last_pfn = this_pfn;
+
+ cur_base += ret << PAGE_SHIFT;
+ npages -= ret;
}
- WARN_ON(nmap != npages);
+ /* Pin the rest chunk */
+ ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+ map_pfn << PAGE_SHIFT, msg->perm);
out:
- if (ret)
+ if (ret) {
vhost_vdpa_unmap(v, msg->iova, msg->size);
-unlock:
+ atomic64_sub(npages, &dev->mm->pinned_vm);
+ }
mmap_read_unlock(dev->mm);
-free:
- kvfree(vmas);
- kvfree(page_list);
+ free_page((unsigned long)page_list);
return ret;
}
@@ -783,6 +781,27 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
v->domain = NULL;
}
+static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
+{
+ struct vdpa_iova_range *range = &v->range;
+ struct iommu_domain_geometry geo;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_iova_range) {
+ *range = ops->get_iova_range(vdpa);
+ } else if (v->domain &&
+ !iommu_domain_get_attr(v->domain,
+ DOMAIN_ATTR_GEOMETRY, &geo) &&
+ geo.force_aperture) {
+ range->first = geo.aperture_start;
+ range->last = geo.aperture_end;
+ } else {
+ range->first = 0;
+ range->last = ULLONG_MAX;
+ }
+}
+
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
@@ -823,6 +842,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
if (r)
goto err_init_iotlb;
+ vhost_vdpa_set_iova_range(v);
+
filep->private_data = v;
return 0;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 02411d89cb46..5bc86f481a78 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/fb.h>
@@ -1114,8 +1115,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
getmem_done:
remove_conflicting_framebuffers(info->apertures,
KBUILD_MODNAME, false);
- if (!gen2vm)
+
+ if (gen2vm) {
+ /* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
+ screen_info.lfb_size = 0;
+ screen_info.lfb_base = 0;
+ screen_info.orig_video_isVGA = 0;
+ } else {
pci_dev_put(pdev);
+ }
kfree(info->apertures);
return 0;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 71ce1b7a23d1..2b385c1b4a99 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -395,8 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
- phys, size, size, dir, attrs);
+ map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;