summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c16
-rw-r--r--drivers/acpi/bus.c11
-rw-r--r--drivers/acpi/cppc_acpi.c26
-rw-r--r--drivers/amba/bus.c8
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/pata_cs5535.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/base/core.c13
-rw-r--r--drivers/base/cpu.c16
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/runtime.c34
-rw-r--r--drivers/base/regmap/regmap-irq.c8
-rw-r--r--drivers/base/regmap/regmap.c15
-rw-r--r--drivers/block/xen-blkfront.c73
-rw-r--r--drivers/bus/bt1-apb.c14
-rw-r--r--drivers/bus/bt1-axi.c14
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c6
-rw-r--r--drivers/char/Kconfig50
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/random.c47
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/clk/stm32/reset-stm32.c1
-rw-r--r--drivers/clocksource/hyperv_timer.c1
-rw-r--r--drivers/comedi/drivers/vmk80xx.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c24
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c8
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c6
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c1
-rw-r--r--drivers/crypto/Kconfig115
-rw-r--r--drivers/crypto/ccp/sp-platform.c12
-rw-r--r--drivers/cxl/core/hdm.c2
-rw-r--r--drivers/cxl/core/mbox.c6
-rw-r--r--drivers/cxl/core/port.c2
-rw-r--r--drivers/cxl/cxl.h1
-rw-r--r--drivers/cxl/cxlmem.h8
-rw-r--r--drivers/cxl/mem.c7
-rw-r--r--drivers/cxl/pmem.c6
-rw-r--r--drivers/devfreq/devfreq.c76
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c8
-rw-r--r--drivers/devfreq/exynos-bus.c6
-rw-r--r--drivers/devfreq/governor_passive.c62
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma-buf/udmabuf.c5
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dmatest.c13
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c8
-rw-r--r--drivers/dma/idxd/device.c5
-rw-r--r--drivers/dma/idxd/init.c13
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/lgm/lgm-dma.c3
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/qcom/bam_dma.c39
-rw-r--r--drivers/dma/ti/dma-crossbar.c5
-rw-r--r--drivers/firewire/core-cdev.c2
-rw-r--r--drivers/firewire/core-device.c6
-rw-r--r--drivers/firmware/arm_scmi/base.c24
-rw-r--r--drivers/firmware/arm_scmi/bus.c6
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/arm_scmi/optee.c7
-rw-r--r--drivers/firmware/arm_scmi/perf.c6
-rw-r--r--drivers/firmware/arm_scmi/power.c2
-rw-r--r--drivers/firmware/arm_scmi/protocols.h5
-rw-r--r--drivers/firmware/arm_scmi/reset.c2
-rw-r--r--drivers/firmware/arm_scmi/sensors.c68
-rw-r--r--drivers/firmware/arm_scmi/voltage.c15
-rw-r--r--drivers/firmware/efi/reboot.c21
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/sysfb.c58
-rw-r--r--drivers/firmware/sysfb_simplefb.c16
-rw-r--r--drivers/gpio/gpio-crystalcove.c70
-rw-r--r--drivers/gpio/gpio-dln2.c23
-rw-r--r--drivers/gpio/gpio-dwapb.c7
-rw-r--r--drivers/gpio/gpio-grgpio.c14
-rw-r--r--drivers/gpio/gpio-merrifield.c22
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c22
-rw-r--r--drivers/gpio/gpio-realtek-otto.c10
-rw-r--r--drivers/gpio/gpio-sch.c35
-rw-r--r--drivers/gpio/gpio-sim.c16
-rw-r--r--drivers/gpio/gpio-vf610.c1
-rw-r--r--drivers/gpio/gpio-vr41xx.c2
-rw-r--r--drivers/gpio/gpio-wcove.c10
-rw-r--r--drivers/gpio/gpio-winbond.c7
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c11
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c359
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c5
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c125
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c5
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h9
-rw-r--r--drivers/gpu/drm/ast/ast_main.c21
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c38
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c42
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c21
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c2
-rw-r--r--drivers/gpu/drm/drm_aperture.c26
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c16
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c9
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c37
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c374
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c67
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c5
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c27
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c17
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c60
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c33
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c18
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c17
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c6
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c52
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c62
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c196
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c97
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h19
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c54
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c29
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c2
-rw-r--r--drivers/hid/hid-hyperv.c5
-rw-r--r--drivers/hv/channel_mgmt.c18
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/hwmon/asus-ec-sensors.c2
-rw-r--r--drivers/hwmon/ibmaem.c12
-rw-r--r--drivers/hwmon/occ/common.c10
-rw-r--r--drivers/hwmon/occ/common.h3
-rw-r--r--drivers/hwmon/occ/p8_i2c.c13
-rw-r--r--drivers/hwmon/occ/p9_sbe.c7
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c31
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c13
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c9
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c3
-rw-r--r--drivers/i2c/busses/i2c-piix4.c16
-rw-r--r--drivers/idle/intel_idle.c76
-rw-r--r--drivers/iio/accel/bma180.c3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c4
-rw-r--r--drivers/iio/accel/mma8452.c22
-rw-r--r--drivers/iio/accel/mxc4005.c4
-rw-r--r--drivers/iio/adc/adi-axi-adc.c3
-rw-r--r--drivers/iio/adc/aspeed_adc.c1
-rw-r--r--drivers/iio/adc/axp288_adc.c8
-rw-r--r--drivers/iio/adc/rzg2l_adc.c8
-rw-r--r--drivers/iio/adc/stm32-adc-core.c9
-rw-r--r--drivers/iio/adc/stm32-adc.c37
-rw-r--r--drivers/iio/adc/ti-ads131e08.c10
-rw-r--r--drivers/iio/adc/xilinx-ams.c2
-rw-r--r--drivers/iio/afe/iio-rescale.c2
-rw-r--r--drivers/iio/chemical/ccs811.c4
-rw-r--r--drivers/iio/frequency/admv1014.c6
-rw-r--r--drivers/iio/gyro/mpu3050-core.c1
-rw-r--r--drivers/iio/humidity/hts221_buffer.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c2
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/proximity/sx9324.c3
-rw-r--r--drivers/iio/test/Kconfig2
-rw-r--r--drivers/iio/test/Makefile2
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c1
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/hw/irdma/cm.c50
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h1
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/misc/soc_button_array.c4
-rw-r--r--drivers/input/mouse/bcm5974.c7
-rw-r--r--drivers/input/touchscreen/goodix.c5
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c4
-rw-r--r--drivers/iommu/intel/dmar.c2
-rw-r--r--drivers/iommu/intel/iommu.c24
-rw-r--r--drivers/iommu/intel/pasid.c69
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-apple-aic.c4
-rw-r--r--drivers/irqchip/irq-gic-realview.c1
-rw-r--r--drivers/irqchip/irq-gic-v3.c48
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c8
-rw-r--r--drivers/irqchip/irq-or1k-pic.c1
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c1
-rw-r--r--drivers/md/dm-core.h12
-rw-r--r--drivers/md/dm-era-target.c8
-rw-r--r--drivers/md/dm-log.c5
-rw-r--r--drivers/md/dm-raid.c36
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/md/dm.c149
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid5-ppl.c4
-rw-r--r--drivers/md/raid5.c10
-rw-r--r--drivers/memory/Kconfig1
-rw-r--r--drivers/memory/mtk-smi.c5
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c29
-rw-r--r--drivers/misc/atmel-ssc.c4
-rw-r--r--drivers/misc/cardreader/rts5261.c2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c27
-rw-r--r--drivers/misc/eeprom/at25.c93
-rw-r--r--drivers/misc/lkdtm/Makefile9
-rw-r--r--drivers/misc/mei/hbm.c3
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-me.c2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c20
-rw-r--r--drivers/mmc/host/sdhci-omap.c14
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c8
-rw-r--r--drivers/mtd/nand/raw/nand_ids.c3
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/amt.c298
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/caif/caif_virtio.c10
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c24
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c22
-rw-r--r--drivers/net/can/usb/gs_usb.c23
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h25
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c285
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c119
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c35
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c4
-rw-r--r--drivers/net/dsa/qca8k.c22
-rw-r--r--drivers/net/dsa/qca8k.h2
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c38
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c16
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c10
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c22
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c10
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c31
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h14
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c53
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c19
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c14
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c21
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c23
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c112
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c33
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c28
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c10
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c10
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c12
-rw-r--r--drivers/net/ethernet/sun/sunhme.c43
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c17
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h51
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c29
-rw-r--r--drivers/net/hamradio/6pack.c9
-rw-r--r--drivers/net/phy/aquantia_main.c15
-rw-r--r--drivers/net/phy/at803x.c6
-rw-r--r--drivers/net/phy/ax88796b.c6
-rw-r--r--drivers/net/phy/dp83822.c4
-rw-r--r--drivers/net/phy/dp83867.c29
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/phy/phy_device.c23
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/phy/smsc.c6
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/asix.h3
-rw-r--r--drivers/net/usb/asix_common.c1
-rw-r--r--drivers/net/usb/ax88179_178a.c127
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/r8152.c30
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c33
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c14
-rw-r--r--drivers/net/xen-netback/rx.c1
-rw-r--r--drivers/net/xen-netfront.c56
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c6
-rw-r--r--drivers/nfc/nfcmrvl/spi.c6
-rw-r--r--drivers/nfc/nfcmrvl/usb.c16
-rw-r--r--drivers/nfc/nxp-nci/i2c.c11
-rw-r--r--drivers/nfc/st21nfca/se.c53
-rw-r--r--drivers/nvdimm/bus.c4
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/nvme.h29
-rw-r--r--drivers/nvme/host/pci.c66
-rw-r--r--drivers/nvme/host/rdma.c12
-rw-r--r--drivers/nvme/host/tcp.c13
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/target/configfs.c20
-rw-r--r--drivers/nvme/target/core.c6
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/passthru.c55
-rw-r--r--drivers/nvme/target/tcp.c23
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx93.c1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c65
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c214
-rw-r--r--drivers/pinctrl/ralink/pinctrl-ralink.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c20
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c2
-rw-r--r--drivers/platform/mellanox/Kconfig2
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c6
-rw-r--r--drivers/platform/mips/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/amd-pmc.c14
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c5
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c3
-rw-r--r--drivers/platform/x86/hp-wmi.c32
-rw-r--r--drivers/platform/x86/ideapad-laptop.c29
-rw-r--r--drivers/platform/x86/intel/atomisp2/led.c3
-rw-r--r--drivers/platform/x86/intel/hid.c6
-rw-r--r--drivers/platform/x86/intel/ifs/Kconfig3
-rw-r--r--drivers/platform/x86/intel/pmc/core.c2
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c84
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c51
-rw-r--r--drivers/platform/x86/x86-android-tablets.c17
-rw-r--r--drivers/power/reset/arm-versatile-reboot.c1
-rw-r--r--drivers/power/supply/ab8500_fg.c9
-rw-r--r--drivers/power/supply/power_supply_core.c24
-rw-r--r--drivers/regulator/qcom_smd-regulator.c8
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c82
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c52
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c23
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c6
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/scsi_debug.c22
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/vmw_pvscsi.h4
-rw-r--r--drivers/soc/atmel/soc.c12
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c1
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c2
-rw-r--r--drivers/soc/qcom/smem.c6
-rw-r--r--drivers/spi/spi-amd.c8
-rw-r--r--drivers/spi/spi-aspeed-smc.c12
-rw-r--r--drivers/spi/spi-bcm2835.c12
-rw-r--r--drivers/spi/spi-cadence-quadspi.c19
-rw-r--r--drivers/spi/spi-cadence.c37
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-rockchip.c11
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/staging/olpc_dcon/Kconfig2
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c20
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_sbc.c6
-rw-r--r--drivers/tee/optee/optee_smc.h2
-rw-r--r--drivers/tee/optee/smc_abi.c4
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c1
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/n_gsm.c2
-rw-r--r--drivers/tty/pty.c14
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dma.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c26
-rw-r--r--drivers/tty/serial/8250/8250_port.c8
-rw-r--r--drivers/tty/serial/amba-pl011.c23
-rw-r--r--drivers/tty/serial/mvebu-uart.c25
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c1
-rw-r--r--drivers/tty/serial/samsung_tty.c5
-rw-r--r--drivers/tty/serial/serial_core.c14
-rw-r--r--drivers/tty/serial/stm32-usart.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty.h3
-rw-r--r--drivers/tty/tty_buffer.c46
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/ufs/core/ufshcd.c80
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c19
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc3/core.c9
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c8
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c1
-rw-r--r--drivers/usb/dwc3/gadget.c30
-rw-r--r--drivers/usb/gadget/function/f_fs.c40
-rw-r--r--drivers/usb/gadget/function/u_ether.c12
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c6
-rw-r--r--drivers/usb/gadget/function/uvc_video.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c63
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c11
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci.c65
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h1
-rw-r--r--drivers/usb/serial/option.c12
-rw-r--r--drivers/usb/serial/pl2303.c29
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/tcpm/Kconfig1
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c42
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c67
-rw-r--r--drivers/vfio/vfio.c17
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/vhost/vringh.c10
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/fbdev/au1100fb.c6
-rw-r--r--drivers/video/fbdev/cirrusfb.c6
-rw-r--r--drivers/video/fbdev/core/fbcon.c33
-rw-r--r--drivers/video/fbdev/core/fbmem.c28
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c4
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c12
-rw-r--r--drivers/video/fbdev/omap/sossi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c2
-rw-r--r--drivers/video/fbdev/simplefb.c3
-rw-r--r--drivers/video/fbdev/skeletonfb.c15
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c9
-rw-r--r--drivers/virtio/Kconfig19
-rw-r--r--drivers/virtio/virtio.c7
-rw-r--r--drivers/virtio/virtio_mmio.c29
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c4
-rw-r--r--drivers/virtio/virtio_ring.c89
-rw-r--r--drivers/watchdog/gxp-wdt.c1
-rw-r--r--drivers/xen/Kconfig20
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/features.c2
-rw-r--r--drivers/xen/gntdev-common.h7
-rw-r--r--drivers/xen/gntdev.c159
-rw-r--r--drivers/xen/grant-dma-iommu.c78
-rw-r--r--drivers/xen/grant-dma-ops.c346
-rw-r--r--drivers/xen/grant-table.c251
-rw-r--r--drivers/xen/xlate_mmu.c1
715 files changed, 8468 insertions, 4416 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index e07782b1fbb6..eaea733b368a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -73,6 +73,7 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
+static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
static DEFINE_MUTEX(video_list_lock);
@@ -1222,6 +1223,9 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
+ if (data->cap._BCM && data->cap._BCL)
+ may_report_brightness_keys = true;
+
mutex_lock(&video->device_list_lock);
list_add_tail(&data->entry, &video->video_device_list);
mutex_unlock(&video->device_list_lock);
@@ -1689,6 +1693,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
+ if (keycode)
+ may_report_brightness_keys = true;
+
acpi_notifier_call_chain(device, event, 0);
if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) {
@@ -2249,6 +2256,7 @@ void acpi_video_unregister(void)
if (register_count) {
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
+ may_report_brightness_keys = false;
}
mutex_unlock(&register_count_mutex);
}
@@ -2270,13 +2278,7 @@ void acpi_video_unregister_backlight(void)
bool acpi_video_handles_brightness_key_presses(void)
{
- bool have_video_busses;
-
- mutex_lock(&video_list_lock);
- have_video_busses = !list_empty(&video_bus_head);
- mutex_unlock(&video_list_lock);
-
- return have_video_busses &&
+ return may_report_brightness_keys &&
(report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
}
EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 86fa61a21826..e2db1bdd9dd2 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed);
bool osc_sb_native_usb4_support_confirmed;
EXPORT_SYMBOL_GPL(osc_sb_native_usb4_support_confirmed);
-bool osc_sb_cppc_not_supported;
+bool osc_sb_cppc2_support_acked;
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_negotiate_platform_control(void)
@@ -358,11 +358,6 @@ static void acpi_bus_osc_negotiate_platform_control(void)
return;
}
-#ifdef CONFIG_ACPI_CPPC_LIB
- osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
- (OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
-#endif
-
/*
* Now run _OSC again with query flag clear and with the caps
* supported by both the OS and the platform.
@@ -376,6 +371,10 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf_ret = context.ret.pointer;
if (context.ret.length > OSC_SUPPORT_DWORD) {
+#ifdef CONFIG_ACPI_CPPC_LIB
+ osc_sb_cppc2_support_acked = capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPCV2_SUPPORT;
+#endif
+
osc_sb_apei_support_acked =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
osc_pc_lpi_support_confirmed =
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 903528f7e187..3c6d4ef87be0 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -578,6 +578,19 @@ bool __weak cpc_ffh_supported(void)
}
/**
+ * cpc_supported_by_cpu() - check if CPPC is supported by CPU
+ *
+ * Check if the architectural support for CPPC is present even
+ * if the _OSC hasn't prescribed it
+ *
+ * Return: true for supported, false for not supported
+ */
+bool __weak cpc_supported_by_cpu(void)
+{
+ return false;
+}
+
+/**
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
*
* Check and allocate the cppc_pcc_data memory.
@@ -684,8 +697,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status;
int ret = -ENODATA;
- if (osc_sb_cppc_not_supported)
- return -ENODEV;
+ if (!osc_sb_cppc2_support_acked) {
+ pr_debug("CPPC v2 _OSC not acked\n");
+ if (!cpc_supported_by_cpu())
+ return -ENODEV;
+ }
/* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
@@ -766,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
- goto out_free;
+ if (!cpc_supported_by_cpu())
+ goto out_free;
}
addr = ioremap(gas_t->address, gas_t->bit_width/8);
@@ -793,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
- goto out_free;
+ if (!cpc_supported_by_cpu())
+ goto out_free;
}
} else {
if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 0e3ed5eb367b..0cb20324da16 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -493,13 +493,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
goto skip_probe;
ret = amba_read_periphid(dev);
- if (ret) {
- if (ret != -EPROBE_DEFER) {
- amba_device_put(dev);
- goto err_out;
- }
+ if (ret)
goto err_release;
- }
skip_probe:
ret = device_add(&dev->dev);
@@ -546,6 +541,7 @@ static int amba_deferred_retry(void)
continue;
list_del_init(&ddev->node);
+ amba_device_put(ddev->dev);
kfree(ddev);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 40e816419f48..9601fa92950a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2010,16 +2010,16 @@ retry:
return err_mask;
}
-static bool ata_log_supported(struct ata_device *dev, u8 log)
+static int ata_log_supported(struct ata_device *dev, u8 log)
{
struct ata_port *ap = dev->link->ap;
if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
- return false;
+ return 0;
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
- return false;
- return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
+ return 0;
+ return get_unaligned_le16(&ap->sector_buf[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
@@ -2455,15 +2455,20 @@ static void ata_dev_config_cpr(struct ata_device *dev)
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
- if (ata_id_major_version(dev->id) < 11 ||
- !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ if (ata_id_major_version(dev->id) < 11)
+ goto out;
+
+ buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
+ if (buf_len == 0)
goto out;
/*
* Read the concurrent positioning ranges log (0x47). We can have at
- * most 255 32B range descriptors plus a 64B header.
+ * most 255 32B range descriptors plus a 64B header. This log varies in
+ * size, so use the size reported in the GPL directory. Reading beyond
+ * the supported length will result in an error.
*/
- buf_len = (64 + 255 * 32 + 511) & ~511;
+ buf_len <<= 9;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
@@ -5462,7 +5467,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi,
int n_ports)
{
- const struct ata_port_info *pi;
+ const struct ata_port_info *pi = &ata_dummy_port_info;
struct ata_host *host;
int i, j;
@@ -5470,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
if (!host)
return NULL;
- for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+ for (i = 0, j = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ppi[j])
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 42cecf95a4e5..86dbb1cdfabd 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2125,7 +2125,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
- put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
+ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
desc[0] = cpr_log->cpr[i].num;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index ca129854a88c..c38027887499 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -196,7 +196,7 @@ static struct {
{ XFER_PIO_0, "XFER_PIO_0" },
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
};
-ata_bitfield_name_match(xfer,ata_xfer_names)
+ata_bitfield_name_search(xfer, ata_xfer_names)
/*
* ATA Port attributes
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 6725931f3c35..c2c3238ff84b 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -90,7 +90,7 @@ static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
static const u16 pio_cmd_timings[5] = {
0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
};
- u32 reg, dummy;
+ u32 reg, __maybe_unused dummy;
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
@@ -129,7 +129,7 @@ static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
static const u32 mwdma_timings[3] = {
0x7F0FFFF3, 0x7F035352, 0x7F024241
};
- u32 reg, dummy;
+ u32 reg, __maybe_unused dummy;
int mode = adev->dma_mode;
rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 6b5ed3046b44..35608a0cf552 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
int i;
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
if (!res_dma) {
+ put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
resource_size(res_dma));
if (!cf_port->dma_base) {
+ put_device(&dma_dev->dev);
of_node_put(dma_node);
return -EINVAL;
}
@@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
irq = i;
irq_handler = octeon_cf_interrupt;
}
+ put_device(&dma_dev->dev);
}
of_node_put(dma_node);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7cd789c4985d..460d6f163e41 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -486,7 +486,18 @@ static void device_link_release_fn(struct work_struct *work)
/* Ensure that all references to the link object have been dropped. */
device_link_synchronize_removal();
- pm_runtime_release_supplier(link, true);
+ pm_runtime_release_supplier(link);
+ /*
+ * If supplier_preactivated is set, the link has been dropped between
+ * the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
+ * in __driver_probe_device(). In that case, drop the supplier's
+ * PM-runtime usage counter to remove the reference taken by
+ * pm_runtime_get_suppliers().
+ */
+ if (link->supplier_preactivated)
+ pm_runtime_put_noidle(link->supplier);
+
+ pm_request_idle(link->supplier);
put_device(link->consumer);
put_device(link->supplier);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2ef23fce0860..4c98849577d4 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -564,6 +564,18 @@ ssize_t __weak cpu_show_srbds(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -573,6 +585,8 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -584,6 +598,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
+ &dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
NULL
};
diff --git a/drivers/base/init.c b/drivers/base/init.c
index d8d0fe687111..397eb9880cec 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/memory.h>
#include <linux/of.h>
+#include <linux/backing-dev.h>
#include "base.h"
@@ -20,6 +21,7 @@
void __init driver_init(void)
{
/* These are the core pieces */
+ bdi_init(&noop_backing_dev_info);
devtmpfs_init();
devices_init();
buses_init();
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 084d67fd55cc..bc60c9cd3230 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -558,7 +558,7 @@ static ssize_t hard_offline_page_store(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0);
+ ret = memory_failure(pfn, MF_SW_SIMULATED);
if (ret == -EOPNOTSUPP)
ret = 0;
return ret ? ret : count;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 676dc72d912d..949907e2e242 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -308,13 +308,10 @@ static int rpm_get_suppliers(struct device *dev)
/**
* pm_runtime_release_supplier - Drop references to device link's supplier.
* @link: Target device link.
- * @check_idle: Whether or not to check if the supplier device is idle.
*
- * Drop all runtime PM references associated with @link to its supplier device
- * and if @check_idle is set, check if that device is idle (and so it can be
- * suspended).
+ * Drop all runtime PM references associated with @link to its supplier device.
*/
-void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
+void pm_runtime_release_supplier(struct device_link *link)
{
struct device *supplier = link->supplier;
@@ -327,9 +324,6 @@ void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
while (refcount_dec_not_one(&link->rpm_active) &&
atomic_read(&supplier->power.usage_count) > 0)
pm_runtime_put_noidle(supplier);
-
- if (check_idle)
- pm_request_idle(supplier);
}
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
@@ -337,8 +331,11 @@ static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- pm_runtime_release_supplier(link, try_to_suspend);
+ device_links_read_lock_held()) {
+ pm_runtime_release_supplier(link);
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
+ }
}
static void rpm_put_suppliers(struct device *dev)
@@ -1771,7 +1768,6 @@ void pm_runtime_get_suppliers(struct device *dev)
if (link->flags & DL_FLAG_PM_RUNTIME) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
- refcount_inc(&link->rpm_active);
}
device_links_read_unlock(idx);
@@ -1791,19 +1787,8 @@ void pm_runtime_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held())
if (link->supplier_preactivated) {
- bool put;
-
link->supplier_preactivated = false;
-
- spin_lock_irq(&dev->power.lock);
-
- put = pm_runtime_status_suspended(dev) &&
- refcount_dec_not_one(&link->rpm_active);
-
- spin_unlock_irq(&dev->power.lock);
-
- if (put)
- pm_runtime_put(link->supplier);
+ pm_runtime_put(link->supplier);
}
device_links_read_unlock(idx);
@@ -1838,7 +1823,8 @@ void pm_runtime_drop_link(struct device_link *link)
return;
pm_runtime_drop_link_count(link->consumer);
- pm_runtime_release_supplier(link, true);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 400c7412a7dc..a6db605707b0 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data)
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+ unsigned int reg = irq_data->reg_offset / map->reg_stride;
unsigned int mask, type;
type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
@@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data)
* at the corresponding offset in regmap_irq_set_type().
*/
if (d->chip->type_in_mask && type)
- mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
+ mask = d->type_buf[reg] & irq_data->mask;
else
mask = irq_data->mask;
if (d->chip->clear_on_unmask)
d->clear_status = true;
- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
+ d->mask_buf[reg] &= ~mask;
}
static void regmap_irq_disable(struct irq_data *data)
@@ -386,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
subreg = &chip->sub_reg_offsets[b];
for (i = 0; i < subreg->num_regs; i++) {
unsigned int offset = subreg->offset[i];
+ unsigned int index = offset / map->reg_stride;
if (chip->not_fixed_stride)
ret = regmap_read(map,
@@ -394,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
else
ret = regmap_read(map,
chip->status_base + offset,
- &data->status_buf[offset]);
+ &data->status_buf[index]);
if (ret)
break;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 2221d9863831..c3517ccc3159 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1880,8 +1880,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
*/
bool regmap_can_raw_write(struct regmap *map)
{
- return map->bus && map->bus->write && map->format.format_val &&
- map->format.format_reg;
+ return map->write && map->format.format_val && map->format.format_reg;
}
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
@@ -2155,10 +2154,9 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
size_t write_len;
int ret;
- if (!map->bus)
- return -EINVAL;
- if (!map->bus->write)
+ if (!map->write)
return -ENOTSUPP;
+
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2278,7 +2276,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
* Some devices don't support bulk write, for them we have a series of
* single write operations.
*/
- if (!map->bus || !map->format.parse_inplace) {
+ if (!map->write || !map->format.parse_inplace) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -2904,6 +2902,9 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
size_t read_len;
int ret;
+ if (!map->read)
+ return -ENOTSUPP;
+
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -3017,7 +3018,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (val_count == 0)
return -EINVAL;
- if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
+ if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
return ret;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a88ce4426400..3646c0cae672 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -152,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
+static bool __read_mostly xen_blkif_trusted = true;
+module_param_named(trusted, xen_blkif_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define BLK_RING_SIZE(info) \
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
@@ -210,6 +214,7 @@ struct blkfront_info
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int feature_persistent:1;
+ unsigned int bounce:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
/* Number of 4KB segments handled */
@@ -310,8 +315,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
if (!gnt_list_entry)
goto out_of_memory;
- if (info->feature_persistent) {
- granted_page = alloc_page(GFP_NOIO);
+ if (info->bounce) {
+ granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
@@ -330,7 +335,7 @@ out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
&rinfo->grants, node) {
list_del(&gnt_list_entry->node);
- if (info->feature_persistent)
+ if (info->bounce)
__free_page(gnt_list_entry->page);
kfree(gnt_list_entry);
i--;
@@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
- if (info->feature_persistent)
+ if (info->bounce)
grant_foreign_access(gnt_list_entry, info);
else {
/* Grant access to the GFN passed by the caller */
@@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
- if (!info->feature_persistent) {
+ if (!info->bounce) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
@@ -703,7 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
.grant_idx = 0,
.segments = NULL,
.rinfo = rinfo,
- .need_copy = rq_data_dir(req) && info->feature_persistent,
+ .need_copy = rq_data_dir(req) && info->bounce,
};
/*
@@ -981,11 +986,12 @@ static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
info->feature_fua ? true : false);
- pr_info("blkfront: %s: %s %s %s %s %s\n",
+ pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
- info->max_indirect_segments ? "enabled;" : "disabled;");
+ info->max_indirect_segments ? "enabled;" : "disabled;",
+ "bounce buffer:", info->bounce ? "enabled" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -1207,7 +1213,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
if (!list_empty(&rinfo->indirect_pages)) {
struct page *indirect_page, *n;
- BUG_ON(info->feature_persistent);
+ BUG_ON(info->bounce);
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
list_del(&indirect_page->lru);
__free_page(indirect_page);
@@ -1224,7 +1230,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
NULL);
rinfo->persistent_gnts_c--;
}
- if (info->feature_persistent)
+ if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@@ -1245,7 +1251,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
for (j = 0; j < segs; j++) {
persistent_gnt = rinfo->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
- if (info->feature_persistent)
+ if (info->bounce)
__free_page(persistent_gnt->page);
kfree(persistent_gnt);
}
@@ -1428,7 +1434,7 @@ static int blkif_completion(unsigned long *id,
data.s = s;
num_sg = s->num_sg;
- if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
+ if (bret->operation == BLKIF_OP_READ && info->bounce) {
for_each_sg(s->sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@@ -1487,7 +1493,7 @@ static int blkif_completion(unsigned long *id,
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- if (!info->feature_persistent) {
+ if (!info->bounce) {
indirect_page = s->indirect_grants[i]->page;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
@@ -1764,6 +1770,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (!info)
return -ENODEV;
+ /* Check if backend is trusted. */
+ info->bounce = !xen_blkif_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
"max-ring-page-order", 0);
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
@@ -2114,9 +2124,11 @@ static void blkfront_closing(struct blkfront_info *info)
return;
/* No more blkif_request(). */
- blk_mq_stop_hw_queues(info->rq);
- blk_mark_disk_dead(info->gd);
- set_capacity(info->gd, 0);
+ if (info->rq && info->gd) {
+ blk_mq_stop_hw_queues(info->rq);
+ blk_mark_disk_dead(info->gd);
+ set_capacity(info->gd, 0);
+ }
for_each_rinfo(info, rinfo, i) {
/* No more gnttab callback work. */
@@ -2171,17 +2183,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
if (err)
goto out_of_memory;
- if (!info->feature_persistent && info->max_indirect_segments) {
+ if (!info->bounce && info->max_indirect_segments) {
/*
- * We are using indirect descriptors but not persistent
- * grants, we need to allocate a set of pages that can be
+ * We are using indirect descriptors but don't have a bounce
+ * buffer, we need to allocate a set of pages that can be
* used for mapping indirect grefs
*/
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
- struct page *indirect_page = alloc_page(GFP_KERNEL);
+ struct page *indirect_page = alloc_page(GFP_KERNEL |
+ __GFP_ZERO);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@@ -2274,6 +2287,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
+ if (info->feature_persistent)
+ info->bounce = true;
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
@@ -2457,16 +2472,19 @@ static int blkfront_remove(struct xenbus_device *xbdev)
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
- del_gendisk(info->gd);
+ if (info->gd)
+ del_gendisk(info->gd);
mutex_lock(&blkfront_mutex);
list_del(&info->info_list);
mutex_unlock(&blkfront_mutex);
blkif_free(info, 0);
- xlbd_release_minors(info->gd->first_minor, info->gd->minors);
- blk_cleanup_disk(info->gd);
- blk_mq_free_tag_set(&info->tag_set);
+ if (info->gd) {
+ xlbd_release_minors(info->gd->first_minor, info->gd->minors);
+ blk_cleanup_disk(info->gd);
+ blk_mq_free_tag_set(&info->tag_set);
+ }
kfree(info);
return 0;
@@ -2542,6 +2560,13 @@ static void blkfront_delay_work(struct work_struct *work)
struct blkfront_info *info;
bool need_schedule_work = false;
+ /*
+ * Note that when using bounce buffers but not persistent grants
+ * there's no need to run blkfront_delay_work because grants are
+ * revoked in blkif_completion or else an error is reported and the
+ * connection is closed.
+ */
+
mutex_lock(&blkfront_mutex);
list_for_each_entry(info, &info_list, info_list) {
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index b25ff941e7c7..63b1b4a76671 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -175,10 +175,9 @@ static int bt1_apb_request_rst(struct bt1_apb *apb)
int ret;
apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
- if (IS_ERR(apb->prst)) {
- dev_warn(apb->dev, "Couldn't get reset control line\n");
- return PTR_ERR(apb->prst);
- }
+ if (IS_ERR(apb->prst))
+ return dev_err_probe(apb->dev, PTR_ERR(apb->prst),
+ "Couldn't get reset control line\n");
ret = reset_control_deassert(apb->prst);
if (ret)
@@ -199,10 +198,9 @@ static int bt1_apb_request_clk(struct bt1_apb *apb)
int ret;
apb->pclk = devm_clk_get(apb->dev, "pclk");
- if (IS_ERR(apb->pclk)) {
- dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
- return PTR_ERR(apb->pclk);
- }
+ if (IS_ERR(apb->pclk))
+ return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
+ "Couldn't get APB clock descriptor\n");
ret = clk_prepare_enable(apb->pclk);
if (ret) {
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
index e7a6744acc7b..70e49a6e5374 100644
--- a/drivers/bus/bt1-axi.c
+++ b/drivers/bus/bt1-axi.c
@@ -135,10 +135,9 @@ static int bt1_axi_request_rst(struct bt1_axi *axi)
int ret;
axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
- if (IS_ERR(axi->arst)) {
- dev_warn(axi->dev, "Couldn't get reset control line\n");
- return PTR_ERR(axi->arst);
- }
+ if (IS_ERR(axi->arst))
+ return dev_err_probe(axi->dev, PTR_ERR(axi->arst),
+ "Couldn't get reset control line\n");
ret = reset_control_deassert(axi->arst);
if (ret)
@@ -159,10 +158,9 @@ static int bt1_axi_request_clk(struct bt1_axi *axi)
int ret;
axi->aclk = devm_clk_get(axi->dev, "aclk");
- if (IS_ERR(axi->aclk)) {
- dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
- return PTR_ERR(axi->aclk);
- }
+ if (IS_ERR(axi->aclk))
+ return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
+ "Couldn't get AXI Interconnect clock\n");
ret = clk_prepare_enable(axi->aclk);
if (ret) {
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index e81a9700cfd0..6143dbf31f31 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -1239,14 +1239,14 @@ error_cleanup_mc_io:
static int fsl_mc_bus_remove(struct platform_device *pdev)
{
struct fsl_mc *mc = platform_get_drvdata(pdev);
+ struct fsl_mc_io *mc_io;
if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
return -EINVAL;
+ mc_io = mc->root_mc_bus_dev->mc_io;
fsl_mc_device_remove(mc->root_mc_bus_dev);
-
- fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
- mc->root_mc_bus_dev->mc_io = NULL;
+ fsl_destroy_mc_io(mc_io);
bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 69fd31ffb847..0b6c03643ddc 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -429,28 +429,40 @@ config ADI
driver include crash and makedumpfile.
config RANDOM_TRUST_CPU
- bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ bool "Initialize RNG using CPU RNG instructions"
+ default y
depends on ARCH_RANDOM
- default n
help
- Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
- RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
- for the purposes of initializing Linux's CRNG. Since this is not
- something that can be independently audited, this amounts to trusting
- that CPU manufacturer (perhaps with the insistence or mandate
- of a Nation State's intelligence or law enforcement agencies)
- has not installed a hidden back door to compromise the CPU's
- random number generation facilities. This can also be configured
- at boot with "random.trust_cpu=on/off".
+ Initialize the RNG using random numbers supplied by the CPU's
+ RNG instructions (e.g. RDRAND), if supported and available. These
+ random numbers are never used directly, but are rather hashed into
+ the main input pool, and this happens regardless of whether or not
+ this option is enabled. Instead, this option controls whether the
+ they are credited and hence can initialize the RNG. Additionally,
+ other sources of randomness are always used, regardless of this
+ setting. Enabling this implies trusting that the CPU can supply high
+ quality and non-backdoored random numbers.
+
+ Say Y here unless you have reason to mistrust your CPU or believe
+ its RNG facilities may be faulty. This may also be configured at
+ boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
- bool "Trust the bootloader to initialize Linux's CRNG"
- help
- Some bootloaders can provide entropy to increase the kernel's initial
- device randomness. Say Y here to assume the entropy provided by the
- booloader is trustworthy so it will be added to the kernel's entropy
- pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool. This can also be configured at boot with
- "random.trust_bootloader=on/off".
+ bool "Initialize RNG using bootloader-supplied seed"
+ default y
+ help
+ Initialize the RNG using a seed supplied by the bootloader or boot
+ environment (e.g. EFI or a bootloader-generated device tree). This
+ seed is not used directly, but is rather hashed into the main input
+ pool, and this happens regardless of whether or not this option is
+ enabled. Instead, this option controls whether the seed is credited
+ and hence can initialize the RNG. Additionally, other sources of
+ randomness are always used, regardless of this setting. Enabling
+ this implies trusting that the bootloader can supply high quality and
+ non-backdoored seeds.
+
+ Say Y here unless you have reason to mistrust your bootloader or
+ believe its RNG facilities may be faulty. This may also be configured
+ at boot time with "random.trust_bootloader=on/off".
endmenu
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index e856df7e285c..a6f3a8a2aca6 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
+ virtio_device_ready(vdev);
+
/* we always have a pending entropy request */
request_entropy(vi);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0e22e3b0a04e..38aad99ebb61 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -1019,7 +1019,7 @@ static struct parport_driver lp_driver = {
static int __init lp_init(void)
{
- int i, err = 0;
+ int i, err;
if (parport_nr[0] == LP_PARPORT_OFF)
return 0;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b691b9d59503..a1af90bacc9f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -87,7 +87,7 @@ static struct fasync_struct *fasync;
/* Control how we warn userspace. */
static struct ratelimit_state urandom_warning =
- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
static int ratelimit_disable __read_mostly =
IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
@@ -408,7 +408,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
/*
* Immediately overwrite the ChaCha key at index 4 with random
- * bytes, in case userspace causes copy_to_user() below to sleep
+ * bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
@@ -650,7 +650,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized)
+ execute_in_process_context(crng_set_ready, &set_ready);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
@@ -724,9 +725,8 @@ static void __cold _credit_init_bits(size_t bits)
*
**********************************************************************/
-static bool used_arch_random;
-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
@@ -776,7 +776,7 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica
int __init random_init(const char *command_line)
{
ktime_t now = ktime_get_real();
- unsigned int i, arch_bytes;
+ unsigned int i, arch_bits;
unsigned long entropy;
#if defined(LATENT_ENTROPY_PLUGIN)
@@ -784,12 +784,12 @@ int __init random_init(const char *command_line)
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
- for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE;
+ for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
if (!arch_get_random_seed_long_early(&entropy) &&
!arch_get_random_long_early(&entropy)) {
entropy = random_get_entropy();
- arch_bytes -= sizeof(entropy);
+ arch_bits -= sizeof(entropy) * 8;
}
_mix_pool_bytes(&entropy, sizeof(entropy));
}
@@ -798,11 +798,18 @@ int __init random_init(const char *command_line)
_mix_pool_bytes(command_line, strlen(command_line));
add_latent_entropy();
+ /*
+ * If we were initialized by the bootloader before jump labels are
+ * initialized, then we should enable the static branch here, where
+ * it's guaranteed that jump labels have been initialized.
+ */
+ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
+ crng_set_ready(NULL);
+
if (crng_ready())
crng_reseed();
else if (trust_cpu)
- credit_init_bits(arch_bytes * 8);
- used_arch_random = arch_bytes * 8 >= POOL_READY_BITS;
+ _credit_init_bits(arch_bits);
WARN_ON(register_pm_notifier(&pm_notifier));
@@ -812,17 +819,6 @@ int __init random_init(const char *command_line)
}
/*
- * Returns whether arch randomness has been mixed into the initial
- * state of the RNG, regardless of whether or not that randomness
- * was credited. Knowing this is only good for a very limited set
- * of uses, such as early init printk pointer obfuscation.
- */
-bool rng_has_arch_random(void)
-{
- return used_arch_random;
-}
-
-/*
* Add device- or boot-specific data to the input pool to help
* initialize it.
*
@@ -865,13 +861,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
* Handle random seed passed by bootloader, and credit it if
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
*/
-void __cold add_bootloader_randomness(const void *buf, size_t len)
+void __init add_bootloader_randomness(const void *buf, size_t len)
{
mix_pool_bytes(buf, len);
if (trust_bootloader)
credit_init_bits(len * 8);
}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
#if IS_ENABLED(CONFIG_VMGENID)
static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
@@ -1014,7 +1009,7 @@ void add_interrupt_randomness(int irq)
if (new_count & MIX_INFLIGHT)
return;
- if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ))
+ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;
if (unlikely(!fast_pool->mix.func))
@@ -1179,7 +1174,7 @@ static void __cold entropy_timer(struct timer_list *timer)
*/
static void __cold try_to_generate_entropy(void)
{
- enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 };
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
struct entropy_timer_state stack;
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index d1535ac13e89..81cb90955d68 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
hw_data->hws[i] =
devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
- "lan966x", 0, base,
+ "lan966x", 0, gate_base,
clk_gate_desc[idx].bit_idx,
0, &clk_gate_lock);
diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c
index 040870130e4b..e89381528af9 100644
--- a/drivers/clk/stm32/reset-stm32.c
+++ b/drivers/clk/stm32/reset-stm32.c
@@ -111,6 +111,7 @@ int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
if (!reset_data)
return -ENOMEM;
+ spin_lock_init(&reset_data->lock);
reset_data->membase = base;
reset_data->rcdev.owner = THIS_MODULE;
reset_data->rcdev.ops = &stm32_reset_ops;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index ff188ab68496..bb47610bbd1c 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -565,4 +565,3 @@ void __init hv_init_clocksource(void)
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
-EXPORT_SYMBOL_GPL(hv_init_clocksource);
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 46023adc5395..4536ed43f65b 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -684,7 +684,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
if (!devpriv->usb_rx_buf)
return -ENOMEM;
- size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
+ size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
if (!devpriv->usb_tx_buf)
return -ENOMEM;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 7be38bc6a673..9ac75c1cde9c 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -566,6 +566,28 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
+static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
+
+ return ret;
+}
+
+static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = amd_pstate_enable(false);
+ if (ret)
+ pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
+
+ return ret;
+}
+
/* Sysfs attributes */
/*
@@ -636,6 +658,8 @@ static struct cpufreq_driver amd_pstate_driver = {
.target = amd_pstate_target,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
+ .suspend = amd_pstate_cpu_suspend,
+ .resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.name = "amd-pstate",
.attr = amd_pstate_attr,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 96de1536e1cb..2c96de3f2d83 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -127,6 +127,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
{ .compatible = "mediatek,mt8183", },
+ { .compatible = "mediatek,mt8186", },
{ .compatible = "mediatek,mt8365", },
{ .compatible = "mediatek,mt8516", },
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 37a1eb20f5ba..76f6b3884e6b 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -439,9 +439,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
/* Both presence and absence of sram regulator are valid cases. */
info->sram_reg = regulator_get_optional(cpu_dev, "sram");
- if (IS_ERR(info->sram_reg))
+ if (IS_ERR(info->sram_reg)) {
+ ret = PTR_ERR(info->sram_reg);
+ if (ret == -EPROBE_DEFER)
+ goto out_free_resources;
+
info->sram_reg = NULL;
- else {
+ } else {
ret = regulator_enable(info->sram_reg);
if (ret) {
dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 20f64a8b0a35..4b8ee2014da6 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -470,6 +470,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
if (slew_done_gpio_np)
slew_done_gpio = read_gpio(slew_done_gpio_np);
+ of_node_put(volt_gpio_np);
+ of_node_put(freq_gpio_np);
+ of_node_put(slew_done_gpio_np);
+
/* If we use the frequency GPIOs, calculate the min/max speeds based
* on the bus frequencies
*/
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 0253731d6d25..36c79580fba2 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -442,6 +442,9 @@ static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
struct platform_device *pdev = cpufreq_get_driver_data();
int ret;
+ if (data->throttle_irq <= 0)
+ return 0;
+
ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
if (ret)
dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
@@ -469,6 +472,9 @@ static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
{
+ if (data->throttle_irq <= 0)
+ return;
+
free_irq(data->throttle_irq, data);
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 6b6b20da2bcf..573b417e1483 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev)
np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
if (np) {
+ of_node_put(np);
dev_info(&pdev->dev, "Disabling due to erratum A-008083");
return -ENODEV;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ee99c02c84e8..3e6aa319920b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -133,98 +133,6 @@ config CRYPTO_PAES_S390
Select this option if you want to use the paes cipher
for example to use protected key encrypted devices.
-config CRYPTO_SHA1_S390
- tristate "SHA1 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-
- It is available as of z990.
-
-config CRYPTO_SHA256_S390
- tristate "SHA256 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA256 secure hash standard (DFIPS 180-2).
-
- It is available as of z9.
-
-config CRYPTO_SHA512_S390
- tristate "SHA384 and SHA512 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA512 secure hash standard.
-
- It is available as of z10.
-
-config CRYPTO_SHA3_256_S390
- tristate "SHA3_224 and SHA3_256 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA3_256 secure hash standard.
-
- It is available as of z14.
-
-config CRYPTO_SHA3_512_S390
- tristate "SHA3_384 and SHA3_512 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA3_512 secure hash standard.
-
- It is available as of z14.
-
-config CRYPTO_DES_S390
- tristate "DES and Triple DES cipher algorithms"
- depends on S390
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- help
- This is the s390 hardware accelerated implementation of the
- DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
-
- As of z990 the ECB and CBC mode are hardware accelerated.
- As of z196 the CTR mode is hardware accelerated.
-
-config CRYPTO_AES_S390
- tristate "AES cipher algorithms"
- depends on S390
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- help
- This is the s390 hardware accelerated implementation of the
- AES cipher algorithms (FIPS-197).
-
- As of z9 the ECB and CBC modes are hardware accelerated
- for 128 bit keys.
- As of z10 the ECB and CBC modes are hardware accelerated
- for all AES key sizes.
- As of z196 the CTR mode is hardware accelerated for all AES
- key sizes and XTS mode is hardware accelerated for 256 and
- 512 bit keys.
-
-config CRYPTO_CHACHA_S390
- tristate "ChaCha20 stream cipher"
- depends on S390
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- help
- This is the s390 SIMD implementation of the ChaCha20 stream
- cipher (RFC 7539).
-
- It is available as of z13.
-
config S390_PRNG
tristate "Pseudo random number generator device driver"
depends on S390
@@ -238,29 +146,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_GHASH_S390
- tristate "GHASH hash function"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of GHASH,
- the hash function used in GCM (Galois/Counter mode).
-
- It is available as of z196.
-
-config CRYPTO_CRC32_S390
- tristate "CRC-32 algorithms"
- depends on S390
- select CRYPTO_HASH
- select CRC32
- help
- Select this option if you want to use hardware accelerated
- implementations of CRC algorithms. With this option, you
- can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
- and CRC-32C (Castagnoli).
-
- It is available with IBM z13 or later.
-
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_LIB_DES
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 9dba52fbee99..7d79a8744f9a 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -85,17 +85,9 @@ static int sp_get_irqs(struct sp_device *sp)
struct sp_platform *sp_platform = sp->dev_specific;
struct device *dev = sp->dev;
struct platform_device *pdev = to_platform_device(dev);
- unsigned int i, count;
int ret;
- for (i = 0, count = 0; i < pdev->num_resources; i++) {
- struct resource *res = &pdev->resource[i];
-
- if (resource_type(res) == IORESOURCE_IRQ)
- count++;
- }
-
- sp_platform->irq_count = count;
+ sp_platform->irq_count = platform_irq_count(pdev);
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@@ -104,7 +96,7 @@ static int sp_get_irqs(struct sp_device *sp)
}
sp->psp_irq = ret;
- if (count == 1) {
+ if (sp_platform->irq_count == 1) {
sp->ccp_irq = ret;
} else {
ret = platform_get_irq(pdev, 1);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 0e89a7a932d4..bfc8ee876278 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -197,7 +197,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
else
cxld->target_type = CXL_DECODER_ACCELERATOR;
- if (is_cxl_endpoint(to_cxl_port(cxld->dev.parent)))
+ if (is_endpoint_decoder(&cxld->dev))
return 0;
target_list.value =
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 54f434733b56..cbf23beebebe 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -355,11 +355,13 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
return -EBUSY;
/* Check the input buffer is the expected size */
- if (info->size_in != send_cmd->in.size)
+ if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
+ (info->size_in != send_cmd->in.size))
return -ENOMEM;
/* Check the output buffer is at least large enough */
- if (send_cmd->out.size < info->size_out)
+ if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
+ (send_cmd->out.size < info->size_out))
return -ENOMEM;
*mem_cmd = (struct cxl_mem_command) {
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index ea60abda6500..dbce99bdffab 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -272,7 +272,7 @@ static const struct device_type cxl_decoder_root_type = {
.groups = cxl_decoder_root_attribute_groups,
};
-static bool is_endpoint_decoder(struct device *dev)
+bool is_endpoint_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_endpoint_type;
}
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 140dc3278cde..6799b27c7db2 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -340,6 +340,7 @@ struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port,
struct cxl_decoder *to_cxl_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
+bool is_endpoint_decoder(struct device *dev);
bool is_cxl_decoder(struct device *dev);
struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 60d10ee1e7fc..7df0b053373a 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -300,13 +300,13 @@ struct cxl_mbox_identify {
} __packed;
struct cxl_mbox_get_lsa {
- u32 offset;
- u32 length;
+ __le32 offset;
+ __le32 length;
} __packed;
struct cxl_mbox_set_lsa {
- u32 offset;
- u32 reserved;
+ __le32 offset;
+ __le32 reserved;
u8 data[];
} __packed;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index c310f1fd3db0..a979d0b484d5 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -29,6 +29,7 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint;
+ int rc;
endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
cxlds->component_reg_phys, parent_port);
@@ -37,13 +38,17 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
+ rc = cxl_endpoint_autoremove(cxlmd, endpoint);
+ if (rc)
+ return rc;
+
if (!endpoint->dev.driver) {
dev_err(&cxlmd->dev, "%s failed probe\n",
dev_name(&endpoint->dev));
return -ENXIO;
}
- return cxl_endpoint_autoremove(cxlmd, endpoint);
+ return 0;
}
static void enable_suspend(void *data)
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index bbeef91e637e..0aaa70b4e0f7 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -108,8 +108,8 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
return -EINVAL;
get_lsa = (struct cxl_mbox_get_lsa) {
- .offset = cmd->in_offset,
- .length = cmd->in_length,
+ .offset = cpu_to_le32(cmd->in_offset),
+ .length = cpu_to_le32(cmd->in_length),
};
rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
@@ -139,7 +139,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
return -ENOMEM;
*set_lsa = (struct cxl_mbox_set_lsa) {
- .offset = cmd->in_offset,
+ .offset = cpu_to_le32(cmd->in_offset),
};
memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 01474daf4548..9602141bb8ec 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -123,7 +123,7 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
unsigned long *min_freq,
unsigned long *max_freq)
{
- unsigned long *freq_table = devfreq->profile->freq_table;
+ unsigned long *freq_table = devfreq->freq_table;
s32 qos_min_freq, qos_max_freq;
lockdep_assert_held(&devfreq->lock);
@@ -133,11 +133,11 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
* The devfreq drivers can initialize this in either ascending or
* descending order and devfreq core supports both.
*/
- if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
+ if (freq_table[0] < freq_table[devfreq->max_state - 1]) {
*min_freq = freq_table[0];
- *max_freq = freq_table[devfreq->profile->max_state - 1];
+ *max_freq = freq_table[devfreq->max_state - 1];
} else {
- *min_freq = freq_table[devfreq->profile->max_state - 1];
+ *min_freq = freq_table[devfreq->max_state - 1];
*max_freq = freq_table[0];
}
@@ -169,8 +169,8 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
{
int lev;
- for (lev = 0; lev < devfreq->profile->max_state; lev++)
- if (freq == devfreq->profile->freq_table[lev])
+ for (lev = 0; lev < devfreq->max_state; lev++)
+ if (freq == devfreq->freq_table[lev])
return lev;
return -EINVAL;
@@ -178,7 +178,6 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
static int set_freq_table(struct devfreq *devfreq)
{
- struct devfreq_dev_profile *profile = devfreq->profile;
struct dev_pm_opp *opp;
unsigned long freq;
int i, count;
@@ -188,25 +187,22 @@ static int set_freq_table(struct devfreq *devfreq)
if (count <= 0)
return -EINVAL;
- profile->max_state = count;
- profile->freq_table = devm_kcalloc(devfreq->dev.parent,
- profile->max_state,
- sizeof(*profile->freq_table),
- GFP_KERNEL);
- if (!profile->freq_table) {
- profile->max_state = 0;
+ devfreq->max_state = count;
+ devfreq->freq_table = devm_kcalloc(devfreq->dev.parent,
+ devfreq->max_state,
+ sizeof(*devfreq->freq_table),
+ GFP_KERNEL);
+ if (!devfreq->freq_table)
return -ENOMEM;
- }
- for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
+ for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) {
opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
if (IS_ERR(opp)) {
- devm_kfree(devfreq->dev.parent, profile->freq_table);
- profile->max_state = 0;
+ devm_kfree(devfreq->dev.parent, devfreq->freq_table);
return PTR_ERR(opp);
}
dev_pm_opp_put(opp);
- profile->freq_table[i] = freq;
+ devfreq->freq_table[i] = freq;
}
return 0;
@@ -246,7 +242,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
if (lev != prev_lev) {
devfreq->stats.trans_table[
- (prev_lev * devfreq->profile->max_state) + lev]++;
+ (prev_lev * devfreq->max_state) + lev]++;
devfreq->stats.total_trans++;
}
@@ -835,6 +831,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (err < 0)
goto err_dev;
mutex_lock(&devfreq->lock);
+ } else {
+ devfreq->freq_table = devfreq->profile->freq_table;
+ devfreq->max_state = devfreq->profile->max_state;
}
devfreq->scaling_min_freq = find_available_min_freq(devfreq);
@@ -870,8 +869,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
array3_size(sizeof(unsigned int),
- devfreq->profile->max_state,
- devfreq->profile->max_state),
+ devfreq->max_state,
+ devfreq->max_state),
GFP_KERNEL);
if (!devfreq->stats.trans_table) {
mutex_unlock(&devfreq->lock);
@@ -880,7 +879,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
}
devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
- devfreq->profile->max_state,
+ devfreq->max_state,
sizeof(*devfreq->stats.time_in_state),
GFP_KERNEL);
if (!devfreq->stats.time_in_state) {
@@ -932,8 +931,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
NULL);
if (err) {
- dev_err(dev, "%s: Unable to start governor for the device\n",
- __func__);
+ dev_err_probe(dev, err,
+ "%s: Unable to start governor for the device\n",
+ __func__);
goto err_init;
}
create_sysfs_files(devfreq, devfreq->governor);
@@ -1665,9 +1665,9 @@ static ssize_t available_frequencies_show(struct device *d,
mutex_lock(&df->lock);
- for (i = 0; i < df->profile->max_state; i++)
+ for (i = 0; i < df->max_state; i++)
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
- "%lu ", df->profile->freq_table[i]);
+ "%lu ", df->freq_table[i]);
mutex_unlock(&df->lock);
/* Truncate the trailing space */
@@ -1690,7 +1690,7 @@ static ssize_t trans_stat_show(struct device *dev,
if (!df->profile)
return -EINVAL;
- max_state = df->profile->max_state;
+ max_state = df->max_state;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
@@ -1707,19 +1707,17 @@ static ssize_t trans_stat_show(struct device *dev,
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
len += sprintf(buf + len, "%10lu",
- df->profile->freq_table[i]);
+ df->freq_table[i]);
len += sprintf(buf + len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
- if (df->profile->freq_table[i]
- == df->previous_freq) {
+ if (df->freq_table[i] == df->previous_freq)
len += sprintf(buf + len, "*");
- } else {
+ else
len += sprintf(buf + len, " ");
- }
- len += sprintf(buf + len, "%10lu:",
- df->profile->freq_table[i]);
+
+ len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
for (j = 0; j < max_state; j++)
len += sprintf(buf + len, "%10u",
df->stats.trans_table[(i * max_state) + j]);
@@ -1743,7 +1741,7 @@ static ssize_t trans_stat_store(struct device *dev,
if (!df->profile)
return -EINVAL;
- if (df->profile->max_state == 0)
+ if (df->max_state == 0)
return count;
err = kstrtoint(buf, 10, &value);
@@ -1751,11 +1749,11 @@ static ssize_t trans_stat_store(struct device *dev,
return -EINVAL;
mutex_lock(&df->lock);
- memset(df->stats.time_in_state, 0, (df->profile->max_state *
+ memset(df->stats.time_in_state, 0, (df->max_state *
sizeof(*df->stats.time_in_state)));
memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
- df->profile->max_state,
- df->profile->max_state));
+ df->max_state,
+ df->max_state));
df->stats.total_trans = 0;
df->stats.last_update = get_jiffies_64();
mutex_unlock(&df->lock);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 9b849d781116..a443e7c42daf 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -519,15 +519,19 @@ static int of_get_devfreq_events(struct device_node *np,
count = of_get_child_count(events_np);
desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
- if (!desc)
+ if (!desc) {
+ of_node_put(events_np);
return -ENOMEM;
+ }
info->num_events = count;
of_id = of_match_device(exynos_ppmu_id_match, dev);
if (of_id)
info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
- else
+ else {
+ of_node_put(events_np);
return -EINVAL;
+ }
j = 0;
for_each_child_of_node(events_np, node) {
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index e689101abc93..f7dcc44f9414 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -447,9 +447,9 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
}
- max_state = bus->devfreq->profile->max_state;
- min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
- max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+ max_state = bus->devfreq->max_state;
+ min_freq = (bus->devfreq->freq_table[0] / 1000);
+ max_freq = (bus->devfreq->freq_table[max_state - 1] / 1000);
pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
dev_name(dev), min_freq, max_freq);
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 72c67979ebe1..953cf9a1e9f7 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -1,4 +1,4 @@
- // SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/devfreq/governor_passive.c
*
@@ -14,10 +14,9 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/units.h>
#include "governor.h"
-#define HZ_PER_KHZ 1000
-
static struct devfreq_cpu_data *
get_parent_cpu_data(struct devfreq_passive_data *p_data,
struct cpufreq_policy *policy)
@@ -34,6 +33,20 @@ get_parent_cpu_data(struct devfreq_passive_data *p_data,
return NULL;
}
+static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
+{
+ struct devfreq_cpu_data *parent_cpu_data, *tmp;
+
+ list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
+ list_del(&parent_cpu_data->node);
+
+ if (parent_cpu_data->opp_table)
+ dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
+
+ kfree(parent_cpu_data);
+ }
+}
+
static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
struct opp_table *p_opp_table,
struct opp_table *opp_table,
@@ -131,18 +144,18 @@ static int get_target_freq_with_devfreq(struct devfreq *devfreq,
goto out;
/* Use interpolation if required opps is not available */
- for (i = 0; i < parent_devfreq->profile->max_state; i++)
- if (parent_devfreq->profile->freq_table[i] == *freq)
+ for (i = 0; i < parent_devfreq->max_state; i++)
+ if (parent_devfreq->freq_table[i] == *freq)
break;
- if (i == parent_devfreq->profile->max_state)
+ if (i == parent_devfreq->max_state)
return -EINVAL;
- if (i < devfreq->profile->max_state) {
- child_freq = devfreq->profile->freq_table[i];
+ if (i < devfreq->max_state) {
+ child_freq = devfreq->freq_table[i];
} else {
- count = devfreq->profile->max_state;
- child_freq = devfreq->profile->freq_table[count - 1];
+ count = devfreq->max_state;
+ child_freq = devfreq->freq_table[count - 1];
}
out:
@@ -222,8 +235,7 @@ static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
{
struct devfreq_passive_data *p_data
= (struct devfreq_passive_data *)devfreq->data;
- struct devfreq_cpu_data *parent_cpu_data;
- int cpu, ret = 0;
+ int ret;
if (p_data->nb.notifier_call) {
ret = cpufreq_unregister_notifier(&p_data->nb,
@@ -232,27 +244,9 @@ static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
return ret;
}
- for_each_possible_cpu(cpu) {
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (!policy) {
- ret = -EINVAL;
- continue;
- }
-
- parent_cpu_data = get_parent_cpu_data(p_data, policy);
- if (!parent_cpu_data) {
- cpufreq_cpu_put(policy);
- continue;
- }
+ delete_parent_cpu_data(p_data);
- list_del(&parent_cpu_data->node);
- if (parent_cpu_data->opp_table)
- dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
- kfree(parent_cpu_data);
- cpufreq_cpu_put(policy);
- }
-
- return ret;
+ return 0;
}
static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
@@ -336,7 +330,6 @@ err_free_cpu_data:
err_put_policy:
cpufreq_cpu_put(policy);
err:
- WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
return ret;
}
@@ -407,8 +400,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
if (!p_data)
return -EINVAL;
- if (!p_data->this)
- p_data->this = devfreq;
+ p_data->this = devfreq;
switch (event) {
case DEVFREQ_GOV_START:
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 0cce6e4ec946..205acb2c744d 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -343,7 +343,7 @@ void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
if (old->context != context)
continue;
- dma_resv_list_set(list, i, replacement, usage);
+ dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
dma_fence_put(old);
}
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index e7330684d3b8..9631f2fd2faf 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
+ pgoff_t pgoff = vmf->pgoff;
- vmf->page = ubuf->pages[vmf->pgoff];
+ if (pgoff >= ubuf->pagecount)
+ return VM_FAULT_SIGBUS;
+ vmf->page = ubuf->pages[pgoff];
get_page(vmf->page);
return 0;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 3e9d726504e2..7b3e6030f7b4 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1900,6 +1900,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) {
+ if (i == 0) {
+ dev_warn(chan2dev(chan),
+ "can't allocate any descriptors\n");
+ return -EIO;
+ }
dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i);
break;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 0a2168a4ccb0..f696246f57fd 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -675,16 +675,10 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
- if (params->polled) {
+ if (params->polled)
flags = DMA_CTRL_ACK;
- } else {
- if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
- } else {
- pr_err("Channel does not support interrupt!\n");
- goto err_pq_array;
- }
- }
+ else
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
while (!(kthread_should_stop() ||
@@ -912,7 +906,6 @@ error_unmap_continue:
runtime = ktime_to_us(ktime);
ret = 0;
-err_pq_array:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index e9c9bcb1f5c2..c741da02b67e 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1164,8 +1164,9 @@ static int dma_chan_pause(struct dma_chan *dchan)
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
} else {
- val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
- BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
+ val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
+ val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
+ BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
}
@@ -1190,12 +1191,13 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan)
{
u32 val;
- val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
if (chan->chip->dw->hdata->reg_map_8_channels) {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
} else {
+ val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index ff0ea60051f0..5a8cc52c1abf 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
struct idxd_wq *wq = idxd->wqs[i];
mutex_lock(&wq->wq_lock);
- if (wq->state == IDXD_WQ_ENABLED) {
- idxd_wq_disable_cleanup(wq);
- wq->state = IDXD_WQ_DISABLED;
- }
+ idxd_wq_disable_cleanup(wq);
idxd_wq_device_reset_cleanup(wq);
mutex_unlock(&wq->wq_lock);
}
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 355fb3ef4cbf..aa3478257ddb 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -512,15 +512,16 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
+ if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
dev_warn(dev, "Unable to turn on user SVA feature.\n");
- else
+ } else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- if (idxd_enable_system_pasid(idxd))
- dev_warn(dev, "No in-kernel DMA with PASID.\n");
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ if (idxd_enable_system_pasid(idxd))
+ dev_warn(dev, "No in-kernel DMA with PASID.\n");
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ }
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 8535018ee7a2..f37a276f519e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -891,7 +891,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
* owned buffer is available (i.e. BD_DONE was set too late).
*/
- if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
+ if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
sdma_enable_channel(sdmac->sdma, sdmac->channel);
}
@@ -2346,7 +2346,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver");
#if IS_ENABLED(CONFIG_SOC_IMX6Q)
MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
#endif
-#if IS_ENABLED(CONFIG_SOC_IMX7D)
+#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index efe8bd3a0e2a..9b9184f964be 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev)
d->core_clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(d->core_clk))
return PTR_ERR(d->core_clk);
- clk_prepare_enable(d->core_clk);
d->rst = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(d->rst))
return PTR_ERR(d->rst);
+
+ clk_prepare_enable(d->core_clk);
reset_control_deassert(d->rst);
ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 858400e42ec0..09915a5cba3e 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2589,7 +2589,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- DEFINE_SPINLOCK(lock);
+ static DEFINE_SPINLOCK(lock);
LIST_HEAD(pool);
if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 87f6ca1541cf..2ff787df513e 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -558,14 +558,6 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
}
-static int bam_pm_runtime_get_sync(struct device *dev)
-{
- if (pm_runtime_enabled(dev))
- return pm_runtime_get_sync(dev);
-
- return 0;
-}
-
/**
* bam_free_chan - Frees dma resources associated with specific channel
* @chan: specified channel
@@ -581,7 +573,7 @@ static void bam_free_chan(struct dma_chan *chan)
unsigned long flags;
int ret;
- ret = bam_pm_runtime_get_sync(bdev->dev);
+ ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
@@ -784,7 +776,7 @@ static int bam_pause(struct dma_chan *chan)
unsigned long flag;
int ret;
- ret = bam_pm_runtime_get_sync(bdev->dev);
+ ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -810,7 +802,7 @@ static int bam_resume(struct dma_chan *chan)
unsigned long flag;
int ret;
- ret = bam_pm_runtime_get_sync(bdev->dev);
+ ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
@@ -919,7 +911,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
- ret = bam_pm_runtime_get_sync(bdev->dev);
+ ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return IRQ_NONE;
@@ -1037,7 +1029,7 @@ static void bam_start_dma(struct bam_chan *bchan)
if (!vd)
return;
- ret = bam_pm_runtime_get_sync(bdev->dev);
+ ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
@@ -1374,11 +1366,6 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
- if (!bdev->bamclk) {
- pm_runtime_disable(&pdev->dev);
- return 0;
- }
-
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -1462,10 +1449,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
{
struct bam_device *bdev = dev_get_drvdata(dev);
- if (bdev->bamclk) {
- pm_runtime_force_suspend(dev);
- clk_unprepare(bdev->bamclk);
- }
+ pm_runtime_force_suspend(dev);
+ clk_unprepare(bdev->bamclk);
return 0;
}
@@ -1475,13 +1460,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
struct bam_device *bdev = dev_get_drvdata(dev);
int ret;
- if (bdev->bamclk) {
- ret = clk_prepare(bdev->bamclk);
- if (ret)
- return ret;
+ ret = clk_prepare(bdev->bamclk);
+ if (ret)
+ return ret;
- pm_runtime_force_resume(dev);
- }
+ pm_runtime_force_resume(dev);
return 0;
}
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 71d24fc07c00..f744ddbbbad7 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
+ put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
@@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
+ put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
+ put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
@@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
+ of_node_put(dma_spec->np);
+ put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index c9fe5903725a..9c89f7d53e99 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1211,7 +1211,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
struct timespec64 ts = {0, 0};
- u32 cycle_time;
+ u32 cycle_time = 0;
int ret = 0;
local_irq_disable();
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 90ed8fdaba75..adddd8c45d0c 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -372,8 +372,7 @@ static ssize_t rom_index_show(struct device *dev,
struct fw_device *device = fw_device(dev->parent);
struct fw_unit *unit = fw_unit(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)(unit->directory - device->config_rom));
+ return sysfs_emit(buf, "%td\n", unit->directory - device->config_rom);
}
static struct device_attribute fw_unit_attributes[] = {
@@ -403,8 +402,7 @@ static ssize_t guid_show(struct device *dev,
int ret;
down_read(&fw_device_rwsem);
- ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
- device->config_rom[3], device->config_rom[4]);
+ ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
up_read(&fw_device_rwsem);
return ret;
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 20fba7370f4e..a52f084a6a87 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -36,7 +36,7 @@ struct scmi_msg_resp_base_attributes {
struct scmi_msg_resp_base_discover_agent {
__le32 agent_id;
- u8 name[SCMI_MAX_STR_SIZE];
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
@@ -119,7 +119,7 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
ret = ph->xops->do_xfer(ph, t);
if (!ret)
- memcpy(vendor_id, t->rx.buf, size);
+ strscpy(vendor_id, t->rx.buf, size);
ph->xops->xfer_put(ph, t);
@@ -221,11 +221,17 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
sizeof(u32);
if (calc_list_sz != real_list_sz) {
- dev_err(dev,
- "Malformed reply - real_sz:%zd calc_sz:%u\n",
- real_list_sz, calc_list_sz);
- ret = -EPROTO;
- break;
+ dev_warn(dev,
+ "Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n",
+ real_list_sz, calc_list_sz, loop_num_ret);
+ /*
+ * Bail out if the expected list size is bigger than the
+ * total payload size of the received reply.
+ */
+ if (calc_list_sz > real_list_sz) {
+ ret = -EPROTO;
+ break;
+ }
}
for (loop = 0; loop < loop_num_ret; loop++)
@@ -270,7 +276,7 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
agent_info = t->rx.buf;
- strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
+ strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
@@ -369,7 +375,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
int id, ret;
u8 *prot_imp;
u32 version;
- char name[SCMI_MAX_STR_SIZE];
+ char name[SCMI_SHORT_NAME_MAX_SIZE];
struct device *dev = ph->dev;
struct scmi_revision_info *rev = scmi_revision_area_get(ph);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index f6fe723ab869..d4e23101448a 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -181,7 +181,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
return NULL;
}
- id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
+ id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL);
if (id < 0) {
kfree_const(scmi_dev->name);
kfree(scmi_dev);
@@ -204,7 +204,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
put_dev:
kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
- ida_simple_remove(&scmi_bus_id, id);
+ ida_free(&scmi_bus_id, id);
return NULL;
}
@@ -212,7 +212,7 @@ void scmi_device_destroy(struct scmi_device *scmi_dev)
{
kfree_const(scmi_dev->name);
scmi_handle_put(scmi_dev->handle);
- ida_simple_remove(&scmi_bus_id, scmi_dev->id);
+ ida_free(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 4d36a9a133d1..3ed7ae0d6781 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -153,7 +153,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret) {
u32 latency = 0;
attributes = le32_to_cpu(attr->attributes);
- strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+ strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
/* clock_enable_latency field is present only since SCMI v3.1 */
if (PROTOCOL_REV_MAJOR(version) >= 0x2)
latency = le32_to_cpu(attr->clock_enable_latency);
@@ -194,6 +194,7 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
}
struct scmi_clk_ipriv {
+ struct device *dev;
u32 clk_id;
struct scmi_clock_info *clk;
};
@@ -223,6 +224,29 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
st->num_returned = NUM_RETURNED(flags);
p->clk->rate_discrete = RATE_DISCRETE(flags);
+ /* Warn about out of spec replies ... */
+ if (!p->clk->rate_discrete &&
+ (st->num_returned != 3 || st->num_remaining != 0)) {
+ dev_warn(p->dev,
+ "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
+ p->clk->name, st->num_returned, st->num_remaining,
+ st->rx_len);
+
+ /*
+ * A known quirk: a triplet is returned but num_returned != 3
+ * Check for a safe payload size and fix.
+ */
+ if (st->num_returned != 3 && st->num_remaining == 0 &&
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
+ st->num_returned = 3;
+ st->num_remaining = 0;
+ } else {
+ dev_err(p->dev,
+ "Cannot fix out-of-spec reply !\n");
+ return -EPROTO;
+ }
+ }
+
return 0;
}
@@ -255,7 +279,6 @@ iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
*rate = RATE_TO_U64(r->rate[st->loop_idx]);
p->clk->list.num_rates++;
- //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
}
return ret;
@@ -266,9 +289,7 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
struct scmi_clock_info *clk)
{
int ret;
-
void *iter;
- struct scmi_msg_clock_describe_rates *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_clk_describe_prepare_message,
.update_state = iter_clk_describe_update_state,
@@ -277,11 +298,13 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
struct scmi_clk_ipriv cpriv = {
.clk_id = clk_id,
.clk = clk,
+ .dev = ph->dev,
};
iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
CLOCK_DESCRIBE_RATES,
- sizeof(*msg), &cpriv);
+ sizeof(struct scmi_msg_clock_describe_rates),
+ &cpriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index c1922bd650ae..8b7ac6663d57 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1223,6 +1223,7 @@ static int scmi_iterator_run(void *iter)
if (ret)
break;
+ st->rx_len = i->t->rx.len;
ret = iops->update_state(st, i->resp, i->priv);
if (ret)
break;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index b503c22cfd32..8abace56b958 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -117,6 +117,7 @@ struct scmi_optee_channel {
u32 channel_id;
u32 tee_session;
u32 caps;
+ u32 rx_len;
struct mutex mu;
struct scmi_chan_info *cinfo;
union {
@@ -302,6 +303,9 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t
return -EIO;
}
+ /* Save response size */
+ channel->rx_len = param[2].u.memref.size;
+
return 0;
}
@@ -353,6 +357,7 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch
shbuf = tee_shm_get_va(channel->tee_shm, 0);
memset(shbuf, 0, msg_size);
channel->req.msg = shbuf;
+ channel->rx_len = msg_size;
return 0;
}
@@ -508,7 +513,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel = cinfo->transport_info;
if (channel->tee_shm)
- msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer);
+ msg_fetch_response(channel->req.msg, channel->rx_len, xfer);
else
shmem_fetch_response(channel->req.shmem, xfer);
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 8f4051aca220..bbb0331801ff 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -252,7 +252,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->mult_factor =
(dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
- strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
@@ -332,7 +332,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
{
int ret;
void *iter;
- struct scmi_msg_perf_describe_levels *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_perf_levels_prepare_message,
.update_state = iter_perf_levels_update_state,
@@ -345,7 +344,8 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
PERF_DESCRIBE_LEVELS,
- sizeof(*msg), &ppriv);
+ sizeof(struct scmi_msg_perf_describe_levels),
+ &ppriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 964882cc8747..356e83631664 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -122,7 +122,7 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
- strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 73304af5ec4a..51c31379f9b3 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -24,8 +24,6 @@
#include <asm/unaligned.h>
-#define SCMI_SHORT_NAME_MAX_SIZE 16
-
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
@@ -181,6 +179,8 @@ struct scmi_protocol_handle {
* @max_resources: Maximum acceptable number of items, configured by the caller
* depending on the underlying resources that it is querying.
* @loop_idx: The iterator loop index in the current multi-part reply.
+ * @rx_len: Size in bytes of the currenly processed message; it can be used by
+ * the user of the iterator to verify a reply size.
* @priv: Optional pointer to some additional state-related private data setup
* by the caller during the iterations.
*/
@@ -190,6 +190,7 @@ struct scmi_iterator_state {
unsigned int num_remaining;
unsigned int max_resources;
unsigned int loop_idx;
+ size_t rx_len;
void *priv;
};
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index a420a9102094..673f3eb498f4 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -116,7 +116,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->latency_us = le32_to_cpu(attr->latency);
if (dom_info->latency_us == U32_MAX)
dom_info->latency_us = 0;
- strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 21e0ce89b153..7288c6117838 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -338,7 +338,6 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
void *iter;
- struct scmi_msg_sensor_list_update_intervals *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_intervals_prepare_message,
.update_state = iter_intervals_update_state,
@@ -351,22 +350,28 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
SENSOR_LIST_UPDATE_INTERVALS,
- sizeof(*msg), &upriv);
+ sizeof(struct scmi_msg_sensor_list_update_intervals),
+ &upriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
return ph->hops->iter_response_run(iter);
}
+struct scmi_apriv {
+ bool any_axes_support_extended_names;
+ struct scmi_sensor_info *s;
+};
+
static void iter_axes_desc_prepare_message(void *message,
const unsigned int desc_index,
const void *priv)
{
struct scmi_msg_sensor_axis_description_get *msg = message;
- const struct scmi_sensor_info *s = priv;
+ const struct scmi_apriv *apriv = priv;
/* Set the number of sensors to be skipped/already read */
- msg->id = cpu_to_le32(s->id);
+ msg->id = cpu_to_le32(apriv->s->id);
msg->axis_desc_index = cpu_to_le32(desc_index);
}
@@ -393,19 +398,21 @@ iter_axes_desc_process_response(const struct scmi_protocol_handle *ph,
u32 attrh, attrl;
struct scmi_sensor_axis_info *a;
size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
- struct scmi_sensor_info *s = priv;
+ struct scmi_apriv *apriv = priv;
const struct scmi_axis_descriptor *adesc = st->priv;
attrl = le32_to_cpu(adesc->attributes_low);
+ if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl))
+ apriv->any_axes_support_extended_names = true;
- a = &s->axis[st->desc_index + st->loop_idx];
+ a = &apriv->s->axis[st->desc_index + st->loop_idx];
a->id = le32_to_cpu(adesc->id);
a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
attrh = le32_to_cpu(adesc->attributes_high);
a->scale = S32_EXT(SENSOR_SCALE(attrh));
a->type = SENSOR_TYPE(attrh);
- strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+ strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE);
if (a->extended_attrs) {
unsigned int ares = le32_to_cpu(adesc->resolution);
@@ -444,10 +451,19 @@ iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph,
void *priv)
{
struct scmi_sensor_axis_info *a;
- const struct scmi_sensor_info *s = priv;
+ const struct scmi_apriv *apriv = priv;
struct scmi_sensor_axis_name_descriptor *adesc = st->priv;
+ u32 axis_id = le32_to_cpu(adesc->axis_id);
- a = &s->axis[st->desc_index + st->loop_idx];
+ if (axis_id >= st->max_resources)
+ return -EPROTO;
+
+ /*
+ * Pick the corresponding descriptor based on the axis_id embedded
+ * in the reply since the list of axes supporting extended names
+ * can be a subset of all the axes.
+ */
+ a = &apriv->s->axis[axis_id];
strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
st->priv = ++adesc;
@@ -458,21 +474,36 @@ static int
scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
+ int ret;
void *iter;
- struct scmi_msg_sensor_axis_description_get *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_axes_desc_prepare_message,
.update_state = iter_axes_extended_name_update_state,
.process_response = iter_axes_extended_name_process_response,
};
+ struct scmi_apriv apriv = {
+ .any_axes_support_extended_names = false,
+ .s = s,
+ };
iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
SENSOR_AXIS_NAME_GET,
- sizeof(*msg), s);
+ sizeof(struct scmi_msg_sensor_axis_description_get),
+ &apriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
- return ph->hops->iter_response_run(iter);
+ /*
+ * Do not cause whole protocol initialization failure when failing to
+ * get extended names for axes.
+ */
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ dev_warn(ph->dev,
+ "Failed to get axes extended names for %s (ret:%d).\n",
+ s->name, ret);
+
+ return 0;
}
static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
@@ -481,12 +512,15 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
{
int ret;
void *iter;
- struct scmi_msg_sensor_axis_description_get *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_axes_desc_prepare_message,
.update_state = iter_axes_desc_update_state,
.process_response = iter_axes_desc_process_response,
};
+ struct scmi_apriv apriv = {
+ .any_axes_support_extended_names = false,
+ .s = s,
+ };
s->axis = devm_kcalloc(ph->dev, s->num_axis,
sizeof(*s->axis), GFP_KERNEL);
@@ -495,7 +529,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
SENSOR_AXIS_DESCRIPTION_GET,
- sizeof(*msg), s);
+ sizeof(struct scmi_msg_sensor_axis_description_get),
+ &apriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
@@ -503,7 +538,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
if (ret)
return ret;
- if (PROTOCOL_REV_MAJOR(version) >= 0x3)
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ apriv.any_axes_support_extended_names)
ret = scmi_sensor_axis_extended_names_get(ph, s);
return ret;
@@ -598,7 +634,7 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
SUPPORTS_AXIS(attrh) ?
SENSOR_AXIS_NUMBER(attrh) : 0,
SCMI_MAX_NUM_SENSOR_AXIS);
- strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+ strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE);
/*
* If supported overwrite short name with the extended
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index 9d195d8719ab..eaa8d944926a 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -180,7 +180,6 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
{
int ret;
void *iter;
- struct scmi_msg_cmd_describe_levels *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_volt_levels_prepare_message,
.update_state = iter_volt_levels_update_state,
@@ -193,7 +192,8 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
VOLTAGE_DESCRIBE_LEVELS,
- sizeof(*msg), &vpriv);
+ sizeof(struct scmi_msg_cmd_describe_levels),
+ &vpriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
@@ -225,15 +225,14 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
/* Retrieve domain attributes at first ... */
put_unaligned_le32(dom, td->tx.buf);
- ret = ph->xops->do_xfer(ph, td);
/* Skip domain on comms error */
- if (ret)
+ if (ph->xops->do_xfer(ph, td))
continue;
v = vinfo->domains + dom;
v->id = dom;
attributes = le32_to_cpu(resp_dom->attr);
- strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE);
+ strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE);
/*
* If supported overwrite short name with the extended one;
@@ -249,12 +248,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
v->async_level_set = true;
}
- ret = scmi_voltage_levels_get(ph, v);
/* Skip invalid voltage descriptors */
- if (ret)
- continue;
-
- ph->xops->reset_rx_to_maxsz(ph, td);
+ scmi_voltage_levels_get(ph, v);
}
ph->xops->xfer_put(ph, td);
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 73089a24f04b..ceae84c19d22 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -6,7 +6,7 @@
#include <linux/efi.h>
#include <linux/reboot.h>
-static void (*orig_pm_power_off)(void);
+static struct sys_off_handler *efi_sys_off_handler;
int efi_reboot_quirk_mode = -1;
@@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void)
return false;
}
-static void efi_power_off(void)
+static int efi_power_off(struct sys_off_data *data)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
- /*
- * The above call should not return, if it does fall back to
- * the original power off method (typically ACPI poweroff).
- */
- if (orig_pm_power_off)
- orig_pm_power_off();
+
+ return NOTIFY_DONE;
}
static int __init efi_shutdown_init(void)
@@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void)
return -ENODEV;
if (efi_poweroff_required()) {
- orig_pm_power_off = pm_power_off;
- pm_power_off = efi_power_off;
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ efi_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 1,
+ efi_power_off, NULL);
+ if (IS_ERR(efi_sys_off_handler))
+ return PTR_ERR(efi_sys_off_handler);
}
return 0;
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 4c7c9dd7733f..7882d4b3f2be 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -26,8 +26,6 @@
#include <linux/sysfb.h>
#include <video/vga.h>
-#include <asm/efi.h>
-
enum {
OVERRIDE_NONE = 0x0,
OVERRIDE_BASE = 0x1,
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 2bfbb05f7d89..1f276f108cc9 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -34,21 +34,59 @@
#include <linux/screen_info.h>
#include <linux/sysfb.h>
+static struct platform_device *pd;
+static DEFINE_MUTEX(disable_lock);
+static bool disabled;
+
+static bool sysfb_unregister(void)
+{
+ if (IS_ERR_OR_NULL(pd))
+ return false;
+
+ platform_device_unregister(pd);
+ pd = NULL;
+
+ return true;
+}
+
+/**
+ * sysfb_disable() - disable the Generic System Framebuffers support
+ *
+ * This disables the registration of system framebuffer devices that match the
+ * generic drivers that make use of the system framebuffer set up by firmware.
+ *
+ * It also unregisters a device if this was already registered by sysfb_init().
+ *
+ * Context: The function can sleep. A @disable_lock mutex is acquired to serialize
+ * against sysfb_init(), that registers a system framebuffer device.
+ */
+void sysfb_disable(void)
+{
+ mutex_lock(&disable_lock);
+ sysfb_unregister();
+ disabled = true;
+ mutex_unlock(&disable_lock);
+}
+EXPORT_SYMBOL_GPL(sysfb_disable);
+
static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
struct simplefb_platform_data mode;
- struct platform_device *pd;
const char *name;
bool compatible;
- int ret;
+ int ret = 0;
+
+ mutex_lock(&disable_lock);
+ if (disabled)
+ goto unlock_mutex;
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
- ret = sysfb_create_simplefb(si, &mode);
- if (!ret)
- return 0;
+ pd = sysfb_create_simplefb(si, &mode);
+ if (!IS_ERR(pd))
+ goto unlock_mutex;
}
/* if the FB is incompatible, create a legacy framebuffer device */
@@ -60,8 +98,10 @@ static __init int sysfb_init(void)
name = "platform-framebuffer";
pd = platform_device_alloc(name, 0);
- if (!pd)
- return -ENOMEM;
+ if (!pd) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
sysfb_apply_efi_quirks(pd);
@@ -73,9 +113,11 @@ static __init int sysfb_init(void)
if (ret)
goto err;
- return 0;
+ goto unlock_mutex;
err:
platform_device_put(pd);
+unlock_mutex:
+ mutex_unlock(&disable_lock);
return ret;
}
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index bda8712bfd8c..a353e27f83f5 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -57,8 +57,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
return false;
}
-__init int sysfb_create_simplefb(const struct screen_info *si,
- const struct simplefb_platform_data *mode)
+__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
{
struct platform_device *pd;
struct resource res;
@@ -76,7 +76,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
base |= (u64)si->ext_lfb_base << 32;
if (!base || (u64)(resource_size_t)base != base) {
printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
/*
@@ -93,7 +93,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
length = mode->height * mode->stride;
if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
length = PAGE_ALIGN(length);
@@ -104,11 +104,11 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
res.start = base;
res.end = res.start + length - 1;
if (res.end <= res.start)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
pd = platform_device_alloc("simple-framebuffer", 0);
if (!pd)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
sysfb_apply_efi_quirks(pd);
@@ -124,10 +124,10 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
if (ret)
goto err_put_device;
- return 0;
+ return pd;
err_put_device:
platform_device_put(pd);
- return ret;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index b55c74a5e064..1ee62cd58582 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/types.h>
#define CRYSTALCOVE_GPIO_NUM 16
#define CRYSTALCOVE_VGPIO_NUM 95
@@ -110,8 +111,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
return reg + gpio % 8;
}
-static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg,
- int gpio)
+static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, int gpio)
{
u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0;
int mask = BIT(gpio % 8);
@@ -140,8 +140,7 @@ static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio)
return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
}
-static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio,
- int value)
+static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
@@ -168,8 +167,7 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void crystalcove_gpio_set(struct gpio_chip *chip,
- unsigned int gpio, int value)
+static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
@@ -185,10 +183,10 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
{
- struct crystalcove_gpio *cg =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
- if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+ if (hwirq >= CRYSTALCOVE_GPIO_NUM)
return 0;
switch (type) {
@@ -215,22 +213,20 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
static void crystalcove_bus_lock(struct irq_data *data)
{
- struct crystalcove_gpio *cg =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
mutex_lock(&cg->buslock);
}
static void crystalcove_bus_sync_unlock(struct irq_data *data)
{
- struct crystalcove_gpio *cg =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
- int gpio = data->hwirq;
+ struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
if (cg->update & UPDATE_IRQ_TYPE)
- crystalcove_update_irq_ctrl(cg, gpio);
+ crystalcove_update_irq_ctrl(cg, hwirq);
if (cg->update & UPDATE_IRQ_MASK)
- crystalcove_update_irq_mask(cg, gpio);
+ crystalcove_update_irq_mask(cg, hwirq);
cg->update = 0;
mutex_unlock(&cg->buslock);
@@ -238,34 +234,43 @@ static void crystalcove_bus_sync_unlock(struct irq_data *data)
static void crystalcove_irq_unmask(struct irq_data *data)
{
- struct crystalcove_gpio *cg =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct crystalcove_gpio *cg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
- if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
- cg->set_irq_mask = false;
- cg->update |= UPDATE_IRQ_MASK;
- }
+ if (hwirq >= CRYSTALCOVE_GPIO_NUM)
+ return;
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ cg->set_irq_mask = false;
+ cg->update |= UPDATE_IRQ_MASK;
}
static void crystalcove_irq_mask(struct irq_data *data)
{
- struct crystalcove_gpio *cg =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct crystalcove_gpio *cg = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
- if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
- cg->set_irq_mask = true;
- cg->update |= UPDATE_IRQ_MASK;
- }
+ if (hwirq >= CRYSTALCOVE_GPIO_NUM)
+ return;
+
+ cg->set_irq_mask = true;
+ cg->update |= UPDATE_IRQ_MASK;
+
+ gpiochip_disable_irq(gc, hwirq);
}
-static struct irq_chip crystalcove_irqchip = {
+static const struct irq_chip crystalcove_irqchip = {
.name = "Crystal Cove",
.irq_mask = crystalcove_irq_mask,
.irq_unmask = crystalcove_irq_unmask,
.irq_set_type = crystalcove_irq_type,
.irq_bus_lock = crystalcove_bus_lock,
.irq_bus_sync_unlock = crystalcove_bus_sync_unlock,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
@@ -293,8 +298,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void crystalcove_gpio_dbg_show(struct seq_file *s,
- struct gpio_chip *chip)
+static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
int gpio, offset;
@@ -353,7 +357,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->regmap = pmic->regmap;
girq = &cg->chip.irq;
- girq->chip = &crystalcove_irqchip;
+ gpio_irq_chip_set_chip(girq, &crystalcove_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 08b9e2cf4f2d..71fa437b491f 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -46,7 +46,6 @@
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
- struct irq_chip irqchip;
/*
* Cache pin direction to save us one transfer, since the hardware has
@@ -306,6 +305,7 @@ static void dln2_irq_unmask(struct irq_data *irqd)
struct dln2_gpio *dln2 = gpiochip_get_data(gc);
int pin = irqd_to_hwirq(irqd);
+ gpiochip_enable_irq(gc, pin);
set_bit(pin, dln2->unmasked_irqs);
}
@@ -316,6 +316,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
int pin = irqd_to_hwirq(irqd);
clear_bit(pin, dln2->unmasked_irqs);
+ gpiochip_disable_irq(gc, pin);
}
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -384,6 +385,17 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
mutex_unlock(&dln2->irq_lock);
}
+static const struct irq_chip dln2_irqchip = {
+ .name = "dln2-irq",
+ .irq_mask = dln2_irq_mask,
+ .irq_unmask = dln2_irq_unmask,
+ .irq_set_type = dln2_irq_set_type,
+ .irq_bus_lock = dln2_irq_bus_lock,
+ .irq_bus_sync_unlock = dln2_irq_bus_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
const void *data, int len)
{
@@ -465,15 +477,8 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.direction_output = dln2_gpio_direction_output;
dln2->gpio.set_config = dln2_gpio_set_config;
- dln2->irqchip.name = "dln2-irq",
- dln2->irqchip.irq_mask = dln2_irq_mask,
- dln2->irqchip.irq_unmask = dln2_irq_unmask,
- dln2->irqchip.irq_set_type = dln2_irq_set_type,
- dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
- dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
-
girq = &dln2->gpio.irq;
- girq->chip = &dln2->irqchip;
+ gpio_irq_chip_set_chip(girq, &dln2_irqchip);
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 04afe728e187..c22fcaa44a61 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -662,10 +662,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio)
gpio->clks[1].id = "db";
err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
gpio->clks);
- if (err) {
- dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(gpio->dev, err,
+ "Cannot get APB/Debounce clocks\n");
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
if (err) {
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index df563616f943..bea0e32c195d 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -434,25 +434,13 @@ static int grgpio_probe(struct platform_device *ofdev)
static int grgpio_remove(struct platform_device *ofdev)
{
struct grgpio_priv *priv = platform_get_drvdata(ofdev);
- int i;
- int ret = 0;
-
- if (priv->domain) {
- for (i = 0; i < GRGPIO_MAX_NGPIO; i++) {
- if (priv->uirqs[i].refcnt != 0) {
- ret = -EBUSY;
- goto out;
- }
- }
- }
gpiochip_remove(&priv->gc);
if (priv->domain)
irq_domain_remove(priv->domain);
-out:
- return ret;
+ return 0;
}
static const struct of_device_id grgpio_match[] = {
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index f3d1baeacbe9..72ac09a59702 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -220,10 +220,8 @@ static void mrfld_irq_ack(struct irq_data *d)
raw_spin_unlock_irqrestore(&priv->lock, flags);
}
-static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
+static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask)
{
- struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
- u32 gpio = irqd_to_hwirq(d);
void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR);
unsigned long flags;
u32 value;
@@ -241,12 +239,20 @@ static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
static void mrfld_irq_mask(struct irq_data *d)
{
- mrfld_irq_unmask_mask(d, false);
+ struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+
+ mrfld_irq_unmask_mask(priv, gpio, false);
+ gpiochip_disable_irq(&priv->chip, gpio);
}
static void mrfld_irq_unmask(struct irq_data *d)
{
- mrfld_irq_unmask_mask(d, true);
+ struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(&priv->chip, gpio);
+ mrfld_irq_unmask_mask(priv, gpio, true);
}
static int mrfld_irq_set_type(struct irq_data *d, unsigned int type)
@@ -329,13 +335,15 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static struct irq_chip mrfld_irqchip = {
+static const struct irq_chip mrfld_irqchip = {
.name = "gpio-merrifield",
.irq_ack = mrfld_irq_ack,
.irq_mask = mrfld_irq_mask,
.irq_unmask = mrfld_irq_unmask,
.irq_set_type = mrfld_irq_set_type,
.irq_set_wake = mrfld_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void mrfld_irq_handler(struct irq_desc *desc)
@@ -482,7 +490,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
girq = &priv->chip.irq;
- girq->chip = &mrfld_irqchip;
+ gpio_irq_chip_set_chip(girq, &mrfld_irqchip);
girq->init_hw = mrfld_irq_init_hw;
girq->parent_handler = mrfld_irq_handler;
girq->num_parents = 1;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index c5166cd47c9c..7f59e5d936c2 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
//
-// MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+// MXS GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
// Copyright 2008 Juergen Beisert, kernel@pengutronix.de
//
// Based on code from Freescale,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 08bc52c3cdcb..ecd7d169470b 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
.reg_bits = 8,
.val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+
.readable_reg = pca953x_readable_register,
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
@@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
{
DECLARE_BITMAP(val, MAX_LINE);
+ u8 regaddr;
int ret;
- ret = regcache_sync_region(chip->regmap, chip->regs->output,
- chip->regs->output + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
- ret = regcache_sync_region(chip->regmap, chip->regs->direction,
- chip->regs->direction + NBANK(chip));
+ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ ret = regcache_sync_region(chip->regmap, regaddr,
+ regaddr + NBANK(chip) - 1);
if (ret)
goto out;
@@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
* sync these registers first and only then sync the rest.
*/
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
- ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+ ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
return ret;
@@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
if (chip->driver_data & PCA_PCAL) {
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
- regaddr + NBANK(chip));
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT latch registers: %d\n",
ret);
@@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
- regaddr + NBANK(chip));
+ regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync INT mask registers: %d\n",
ret);
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index c52b2cb1acae..63dcf42f7c20 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -172,6 +172,8 @@ static void realtek_gpio_irq_unmask(struct irq_data *data)
unsigned long flags;
u16 m;
+ gpiochip_enable_irq(&ctrl->gc, line);
+
raw_spin_lock_irqsave(&ctrl->lock, flags);
m = ctrl->intr_mask[port];
m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
@@ -195,6 +197,8 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
ctrl->intr_mask[port] = m;
realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ gpiochip_disable_irq(&ctrl->gc, line);
}
static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
@@ -315,13 +319,15 @@ static int realtek_gpio_irq_init(struct gpio_chip *gc)
return 0;
}
-static struct irq_chip realtek_gpio_irq_chip = {
+static const struct irq_chip realtek_gpio_irq_chip = {
.name = "realtek-otto-gpio",
.irq_ack = realtek_gpio_irq_ack,
.irq_mask = realtek_gpio_irq_mask,
.irq_unmask = realtek_gpio_irq_unmask,
.irq_set_type = realtek_gpio_irq_set_type,
.irq_set_affinity = realtek_gpio_irq_set_affinity,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static const struct of_device_id realtek_gpio_of_match[] = {
@@ -404,7 +410,7 @@ static int realtek_gpio_probe(struct platform_device *pdev)
irq = platform_get_irq_optional(pdev, 0);
if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
girq = &ctrl->gc.irq;
- girq->chip = &realtek_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
girq->parent_handler = realtek_gpio_irq_handler;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index acda4c5052d3..8a83f7bf4382 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -38,7 +38,6 @@
struct sch_gpio {
struct gpio_chip chip;
- struct irq_chip irqchip;
spinlock_t lock;
unsigned short iobase;
unsigned short resume_base;
@@ -218,11 +217,9 @@ static void sch_irq_ack(struct irq_data *d)
spin_unlock_irqrestore(&sch->lock, flags);
}
-static void sch_irq_mask_unmask(struct irq_data *d, int val)
+static void sch_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t gpio_num, int val)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct sch_gpio *sch = gpiochip_get_data(gc);
- irq_hw_number_t gpio_num = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&sch->lock, flags);
@@ -232,14 +229,32 @@ static void sch_irq_mask_unmask(struct irq_data *d, int val)
static void sch_irq_mask(struct irq_data *d)
{
- sch_irq_mask_unmask(d, 0);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+
+ sch_irq_mask_unmask(gc, gpio_num, 0);
+ gpiochip_disable_irq(gc, gpio_num);
}
static void sch_irq_unmask(struct irq_data *d)
{
- sch_irq_mask_unmask(d, 1);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, gpio_num);
+ sch_irq_mask_unmask(gc, gpio_num, 1);
}
+static const struct irq_chip sch_irqchip = {
+ .name = "sch_gpio",
+ .irq_ack = sch_irq_ack,
+ .irq_mask = sch_irq_mask,
+ .irq_unmask = sch_irq_unmask,
+ .irq_set_type = sch_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
{
struct sch_gpio *sch = context;
@@ -367,14 +382,8 @@ static int sch_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sch);
- sch->irqchip.name = "sch_gpio";
- sch->irqchip.irq_ack = sch_irq_ack;
- sch->irqchip.irq_mask = sch_irq_mask;
- sch->irqchip.irq_unmask = sch_irq_unmask;
- sch->irqchip.irq_set_type = sch_irq_type;
-
girq = &sch->chip.irq;
- girq->chip = &sch->irqchip;
+ gpio_irq_chip_set_chip(girq, &sch_irqchip);
girq->num_parents = 0;
girq->parents = NULL;
girq->parent_handler = NULL;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 98109839102f..1020c2feb249 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -991,28 +991,22 @@ static struct configfs_attribute *gpio_sim_device_config_attrs[] = {
};
struct gpio_sim_chip_name_ctx {
- struct gpio_sim_device *dev;
+ struct fwnode_handle *swnode;
char *page;
};
static int gpio_sim_emit_chip_name(struct device *dev, void *data)
{
struct gpio_sim_chip_name_ctx *ctx = data;
- struct fwnode_handle *swnode;
- struct gpio_sim_bank *bank;
/* This would be the sysfs device exported in /sys/class/gpio. */
if (dev->class)
return 0;
- swnode = dev_fwnode(dev);
+ if (device_match_fwnode(dev, ctx->swnode))
+ return sprintf(ctx->page, "%s\n", dev_name(dev));
- list_for_each_entry(bank, &ctx->dev->bank_list, siblings) {
- if (bank->swnode == swnode)
- return sprintf(ctx->page, "%s\n", dev_name(dev));
- }
-
- return -ENODATA;
+ return 0;
}
static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
@@ -1020,7 +1014,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
{
struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
- struct gpio_sim_chip_name_ctx ctx = { dev, page };
+ struct gpio_sim_chip_name_ctx ctx = { bank->swnode, page };
int ret;
mutex_lock(&dev->lock);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 23cddb265a0d..9db42f6a2043 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
#define VF610_GPIO_PER_PORT 32
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 98cd715ccc33..8d09b619c166 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq)
printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
maskl, pendl, maskh, pendh);
- atomic_inc(&irq_err_count);
-
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 16a0fae1e32e..c18b6b47384f 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -299,6 +299,8 @@ static void wcove_irq_unmask(struct irq_data *data)
if (gpio >= WCOVE_GPIO_NUM)
return;
+ gpiochip_enable_irq(chip, gpio);
+
wg->set_irq_mask = false;
wg->update |= UPDATE_IRQ_MASK;
}
@@ -314,15 +316,19 @@ static void wcove_irq_mask(struct irq_data *data)
wg->set_irq_mask = true;
wg->update |= UPDATE_IRQ_MASK;
+
+ gpiochip_disable_irq(chip, gpio);
}
-static struct irq_chip wcove_irqchip = {
+static const struct irq_chip wcove_irqchip = {
.name = "Whiskey Cove",
.irq_mask = wcove_irq_mask,
.irq_unmask = wcove_irq_unmask,
.irq_set_type = wcove_irq_type,
.irq_bus_lock = wcove_bus_lock,
.irq_bus_sync_unlock = wcove_bus_sync_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
@@ -452,7 +458,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
}
girq = &wg->chip.irq;
- girq->chip = &wcove_irqchip;
+ gpio_irq_chip_set_chip(girq, &wcove_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 7f8f5b02e31d..4b61d975cc0e 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
unsigned long *base = gpiochip_get_data(gc);
const struct winbond_gpio_info *info;
bool val;
+ int ret;
winbond_gpio_get_info(&offset, &info);
- val = winbond_sio_enter(*base);
- if (val)
- return val;
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
winbond_sio_select_logical(*base, info->dev);
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index b6d3a57e27ed..7f8e2fed2988 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
map[index] &= ~(0xFFFFFFFFul << offset);
- map[index] |= v << offset;
+ map[index] |= (unsigned long)v << offset;
}
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f5aa5f93342a..b26e64338376 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -421,6 +421,10 @@ out_free_lh:
* @work: the worker that implements software debouncing
* @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line
+ * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
+ * @raw_level: the line level at the time of event
+ * @total_discard_seq: the running counter of the discarded events
+ * @last_seqno: the last sequence number before debounce period expires
*/
struct line {
struct gpio_desc *desc;
@@ -1460,11 +1464,12 @@ static ssize_t linereq_read(struct file *file,
static void linereq_free(struct linereq *lr)
{
unsigned int i;
- bool hte;
+ bool hte = false;
for (i = 0; i < lr->num_lines; i++) {
- hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
- &lr->lines[i].desc->flags);
+ if (lr->lines[i].desc)
+ hte = !!test_bit(FLAG_EVENT_CLOCK_HTE,
+ &lr->lines[i].desc->flags);
edge_detector_stop(&lr->lines[i], hte);
if (lr->lines[i].desc)
gpiod_free(lr->lines[i].desc);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e88c497fa010..f65656df3619 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -256,7 +256,6 @@ config DRM_AMDGPU
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
- select DRM_BUDDY
help
Choose this option if you have a recent AMD Radeon graphics card.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1f8161cd507f..3b1c675aba34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -714,7 +714,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
{
bool all_hub = false;
- if (adev->family == AMDGPU_FAMILY_AI)
+ if (adev->family == AMDGPU_FAMILY_AI ||
+ adev->family == AMDGPU_FAMILY_RV)
all_hub = true;
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 67abf8dcd30a..4608599ba6bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdkfd_process_info *process_info = vm->process_info;
- struct amdgpu_bo *pd = vm->root.bo;
if (!process_info)
return;
- /* Release eviction fence from PD */
- amdgpu_bo_reserve(pd, false);
- amdgpu_bo_fence(pd, NULL, false);
- amdgpu_bo_unreserve(pd);
-
/* Update process info */
mutex_lock(&process_info->lock);
process_info->n_vms--;
@@ -1918,9 +1912,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
return -EINVAL;
}
- /* delete kgd_mem from kfd_bo_list to avoid re-validating
- * this BO in BO's restoring after eviction.
- */
mutex_lock(&mem->process_info->lock);
ret = amdgpu_bo_reserve(bo, true);
@@ -1943,7 +1934,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
amdgpu_amdkfd_remove_eviction_fence(
bo, mem->process_info->eviction_fence);
- list_del_init(&mem->validate_list.head);
if (size)
*size = amdgpu_bo_size(bo);
@@ -2512,12 +2502,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Attach new eviction fence to all BOs */
+ /* Attach new eviction fence to all BOs except pinned ones */
list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
+ validate_list.head) {
+ if (mem->bo->tbo.pin_count)
+ continue;
+
amdgpu_bo_fence(mem->bo,
&process_info->eviction_fence->base, true);
-
+ }
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 714178f1b6c6..2168163aad2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
{
struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
rhead);
-
+ mutex_destroy(&list->bo_list_mutex);
kvfree(list);
}
@@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+ mutex_init(&list->bo_list_mutex);
*result = list;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 529d52a204cf..9caea1688fc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -47,6 +47,10 @@ struct amdgpu_bo_list {
struct amdgpu_bo *oa_obj;
unsigned first_userptr;
unsigned num_entries;
+
+ /* Protect access during command submission.
+ */
+ struct mutex bo_list_mutex;
};
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b28af04b0c3e..d8f1335bc68f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ mutex_lock(&p->bo_list->bo_list_mutex);
+
/* One for TTM and one for the CS job */
amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->tv.num_shared = 2;
@@ -651,6 +653,7 @@ out_free_user_pages:
kvfree(e->user_pages);
e->user_pages = NULL;
}
+ mutex_unlock(&p->bo_list->bo_list_mutex);
}
return r;
}
@@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (error && backoff)
+ if (error && backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
+ mutex_unlock(&parser->bo_list->bo_list_mutex);
+ }
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
+ if (r) {
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
+ }
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
- if (r)
+ if (r) {
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
+ }
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 625424f3082b..58df107e3beb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5164,7 +5164,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 17c9bbe0cbc5..4dfd6724b3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1528,6 +1528,21 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
stime, etime, mode);
}
+static bool
+amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_fb_helper *fb_helper = dev->fb_helper;
+
+ if (!fb_helper || !fb_helper->buffer)
+ return false;
+
+ if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
+ return false;
+
+ return true;
+}
+
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
@@ -1563,10 +1578,12 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
+ if (!amdgpu_display_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
}
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ede2fa56f6c9..16699158e00d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
- r = amdgpu_ras_block_late_init(adev, ras_block);
- if (r)
- return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
if (!amdgpu_persistent_edc_harvesting_supported(adev))
amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
+ } else {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 798c56214a23..aebc384531ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(9, 1, 0):
/* RENOIR looks like RAVEN */
case IP_VERSION(9, 3, 0):
+ /* GC 10.3.7 */
+ case IP_VERSION(10, 3, 7):
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
case IP_VERSION(10, 3, 3):
- /* GC 10.3.7 */
- case IP_VERSION(10, 3, 7):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b4cf8717f554..89011bae7588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -320,6 +320,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 801f6fa692e9..6de63ea6687e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -642,7 +642,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->visible_pin_size),
vram_gtt.vram_size);
vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
- vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
@@ -675,7 +674,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
mem.gtt.total_heap_size = gtt_man->size;
- mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 2de9309a4193..dac202ae864d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
+ /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
+ if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+ dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
+ }
+
s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
"ue", info.ue_count,
"ce", info.ce_count);
@@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
- if (obj->adev->asic_type == CHIP_ALDEBARAN) {
+ if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
- DRM_WARN("Failed to reset error counter and error status");
+ dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
}
return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
@@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
}
}
- if (!amdgpu_persistent_edc_harvesting_supported(adev))
- amdgpu_ras_reset_error_status(adev, info->head.block);
-
return 0;
}
@@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
if (res)
return res;
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(adev, info.head.block))
+ dev_warn(adev->dev, "Failed to reset error counter and error status");
+ }
+
ce += info.ce_count;
ue += info.ue_count;
}
@@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
continue;
amdgpu_ras_query_error_status(adev, &info);
+
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(adev, info.head.block))
+ dev_warn(adev->dev, "Failed to reset error counter and error status");
+ }
}
}
@@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
!amdgpu_ras_asic_supported(adev))
return;
- if (!(amdgpu_sriov_vf(adev) &&
- (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))))
+ /* If driver run on sriov guest side, only enable ras for aldebaran */
+ if (amdgpu_sriov_vf(adev) &&
+ adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2))
return;
if (!adev->gmc.xgmi.connected_to_cpu) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 6546552e596c..acfa207cf970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -30,15 +30,12 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_range_manager.h>
-#include "amdgpu_vram_mgr.h"
-
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
uint64_t start;
uint64_t size;
uint64_t remaining;
- void *node;
- uint32_t mem_type;
+ struct drm_mm_node *node;
};
/**
@@ -55,63 +52,27 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
uint64_t start, uint64_t size,
struct amdgpu_res_cursor *cur)
{
- struct drm_buddy_block *block;
- struct list_head *head, *next;
struct drm_mm_node *node;
- if (!res)
- goto fallback;
-
- BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
-
- cur->mem_type = res->mem_type;
-
- switch (cur->mem_type) {
- case TTM_PL_VRAM:
- head = &to_amdgpu_vram_mgr_resource(res)->blocks;
-
- block = list_first_entry_or_null(head,
- struct drm_buddy_block,
- link);
- if (!block)
- goto fallback;
-
- while (start >= amdgpu_vram_mgr_block_size(block)) {
- start -= amdgpu_vram_mgr_block_size(block);
-
- next = block->link.next;
- if (next != head)
- block = list_entry(next, struct drm_buddy_block, link);
- }
-
- cur->start = amdgpu_vram_mgr_block_start(block) + start;
- cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
- cur->remaining = size;
- cur->node = block;
- break;
- case TTM_PL_TT:
- node = to_ttm_range_mgr_node(res)->mm_nodes;
- while (start >= node->size << PAGE_SHIFT)
- start -= node++->size << PAGE_SHIFT;
-
- cur->start = (node->start << PAGE_SHIFT) + start;
- cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ if (!res || res->mem_type == TTM_PL_SYSTEM) {
+ cur->start = start;
+ cur->size = size;
cur->remaining = size;
- cur->node = node;
- break;
- default:
- goto fallback;
+ cur->node = NULL;
+ WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+ return;
}
- return;
+ BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
-fallback:
- cur->start = start;
- cur->size = size;
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
+ while (start >= node->size << PAGE_SHIFT)
+ start -= node++->size << PAGE_SHIFT;
+
+ cur->start = (node->start << PAGE_SHIFT) + start;
+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
cur->remaining = size;
- cur->node = NULL;
- WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
- return;
+ cur->node = node;
}
/**
@@ -124,9 +85,7 @@ fallback:
*/
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
{
- struct drm_buddy_block *block;
- struct drm_mm_node *node;
- struct list_head *next;
+ struct drm_mm_node *node = cur->node;
BUG_ON(size > cur->remaining);
@@ -140,27 +99,9 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
return;
}
- switch (cur->mem_type) {
- case TTM_PL_VRAM:
- block = cur->node;
-
- next = block->link.next;
- block = list_entry(next, struct drm_buddy_block, link);
-
- cur->node = block;
- cur->start = amdgpu_vram_mgr_block_start(block);
- cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
- break;
- case TTM_PL_TT:
- node = cur->node;
-
- cur->node = ++node;
- cur->start = node->start << PAGE_SHIFT;
- cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
- break;
- default:
- return;
- }
+ cur->node = ++node;
+ cur->start = node->start << PAGE_SHIFT;
+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index be6f76a30ac6..3b4c19412625 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1798,18 +1798,26 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
- /* Compute GTT size, either bsaed on 3/4th the size of RAM size
+ /* Compute GTT size, either based on 1/2 the size of RAM size
* or whatever the user passed on module init */
if (amdgpu_gtt_size == -1) {
struct sysinfo si;
si_meminfo(&si);
- gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->gmc.mc_vram_size),
- ((uint64_t)si.totalram * si.mem_unit * 3/4));
- }
- else
+ /* Certain GL unit tests for large textures can cause problems
+ * with the OOM killer since there is no way to link this memory
+ * to a process. This was originally mitigated (but not necessarily
+ * eliminated) by limiting the GTT size. The problem is this limit
+ * is often too low for many modern games so just make the limit 1/2
+ * of system memory which aligns with TTM. The OOM accounting needs
+ * to be addressed, but we shouldn't prevent common 3D applications
+ * from being usable just to potentially mitigate that corner case.
+ */
+ gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ (u64)si.totalram * si.mem_unit / 2);
+ } else {
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ }
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6a70818039dd..9120ae80ef52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,7 +26,6 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
-#include "amdgpu_vram_mgr.h"
#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@@ -39,6 +38,15 @@
#define AMDGPU_POISON 0xd0bed0be
+struct amdgpu_vram_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+ struct list_head reservations_pending;
+ struct list_head reserved_pages;
+ atomic64_t vis_usage;
+};
+
struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 576849e95296..108e8e8a1a36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -496,7 +496,8 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 2ceeaa4c793a..dc76d2b3ce52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
{
struct amdgpu_vm_update_params params;
struct amdgpu_vm_bo_base *entry;
+ bool flush_tlb_needed = false;
int r, idx;
if (list_empty(&vm->relocated))
@@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
goto error;
list_for_each_entry(entry, &vm->relocated, vm_status) {
+ /* vm_flush_needed after updating moved PDEs */
+ flush_tlb_needed |= entry->moved;
+
r = amdgpu_vm_pde_update(&params, entry);
if (r)
goto error;
@@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- /* vm_flush_needed after updating PDEs */
- atomic64_inc(&vm->tlb_seq);
+ if (flush_tlb_needed)
+ atomic64_inc(&vm->tlb_seq);
while (!list_empty(&vm->relocated)) {
entry = list_first_entry(&vm->relocated,
@@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
+ /*
+ * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
+ */
+ flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
+
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 49e4092f447f..0a7611648573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -32,10 +32,8 @@
#include "atom.h"
struct amdgpu_vram_reservation {
- u64 start;
- u64 size;
- struct list_head allocated;
- struct list_head blocks;
+ struct list_head node;
+ struct drm_mm_node mm_node;
};
static inline struct amdgpu_vram_mgr *
@@ -188,18 +186,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
};
/**
- * amdgpu_vram_mgr_vis_size - Calculate visible block size
+ * amdgpu_vram_mgr_vis_size - Calculate visible node size
*
* @adev: amdgpu_device pointer
- * @block: DRM BUDDY block structure
+ * @node: MM node structure
*
- * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
+ * Calculate how many bytes of the MM node are inside visible VRAM
*/
static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
- struct drm_buddy_block *block)
+ struct drm_mm_node *node)
{
- u64 start = amdgpu_vram_mgr_block_start(block);
- u64 end = start + amdgpu_vram_mgr_block_size(block);
+ uint64_t start = node->start << PAGE_SHIFT;
+ uint64_t end = (node->size + node->start) << PAGE_SHIFT;
if (start >= adev->gmc.visible_vram_size)
return 0;
@@ -220,9 +218,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
- struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
- struct drm_buddy_block *block;
- u64 usage = 0;
+ unsigned pages = res->num_pages;
+ struct drm_mm_node *mm;
+ u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
@@ -230,8 +228,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- list_for_each_entry(block, &vres->blocks, link)
- usage += amdgpu_vram_mgr_vis_size(adev, block);
+ mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
+ for (usage = 0; pages; pages -= mm->size, mm++)
+ usage += amdgpu_vram_mgr_vis_size(adev, mm);
return usage;
}
@@ -241,30 +240,23 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_buddy *mm = &mgr->mm;
+ struct drm_mm *mm = &mgr->mm;
struct amdgpu_vram_reservation *rsv, *temp;
- struct drm_buddy_block *block;
uint64_t vis_usage;
- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
- if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
- rsv->size, mm->chunk_size, &rsv->allocated,
- DRM_BUDDY_RANGE_ALLOCATION))
- continue;
-
- block = amdgpu_vram_mgr_first_block(&rsv->allocated);
- if (!block)
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
+ if (drm_mm_reserve_node(mm, &rsv->mm_node))
continue;
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
- rsv->start, rsv->size);
+ rsv->mm_node.start, rsv->mm_node.size);
- vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
+ vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
atomic64_add(vis_usage, &mgr->vis_usage);
spin_lock(&man->bdev->lru_lock);
- man->usage += rsv->size;
+ man->usage += rsv->mm_node.size << PAGE_SHIFT;
spin_unlock(&man->bdev->lru_lock);
- list_move(&rsv->blocks, &mgr->reserved_pages);
+ list_move(&rsv->node, &mgr->reserved_pages);
}
}
@@ -286,16 +278,14 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
if (!rsv)
return -ENOMEM;
- INIT_LIST_HEAD(&rsv->allocated);
- INIT_LIST_HEAD(&rsv->blocks);
+ INIT_LIST_HEAD(&rsv->node);
+ rsv->mm_node.start = start >> PAGE_SHIFT;
+ rsv->mm_node.size = size >> PAGE_SHIFT;
- rsv->start = start;
- rsv->size = size;
-
- mutex_lock(&mgr->lock);
- list_add_tail(&rsv->blocks, &mgr->reservations_pending);
+ spin_lock(&mgr->lock);
+ list_add_tail(&rsv->node, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(&mgr->manager);
- mutex_unlock(&mgr->lock);
+ spin_unlock(&mgr->lock);
return 0;
}
@@ -317,19 +307,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
struct amdgpu_vram_reservation *rsv;
int ret;
- mutex_lock(&mgr->lock);
+ spin_lock(&mgr->lock);
- list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
- if (rsv->start <= start &&
- (start < (rsv->start + rsv->size))) {
+ list_for_each_entry(rsv, &mgr->reservations_pending, node) {
+ if ((rsv->mm_node.start <= start) &&
+ (start < (rsv->mm_node.start + rsv->mm_node.size))) {
ret = -EBUSY;
goto out;
}
}
- list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
- if (rsv->start <= start &&
- (start < (rsv->start + rsv->size))) {
+ list_for_each_entry(rsv, &mgr->reserved_pages, node) {
+ if ((rsv->mm_node.start <= start) &&
+ (start < (rsv->mm_node.start + rsv->mm_node.size))) {
ret = 0;
goto out;
}
@@ -337,11 +327,33 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
ret = -ENOENT;
out:
- mutex_unlock(&mgr->lock);
+ spin_unlock(&mgr->lock);
return ret;
}
/**
+ * amdgpu_vram_mgr_virt_start - update virtual start address
+ *
+ * @mem: ttm_resource to update
+ * @node: just allocated node
+ *
+ * Calculate a virtual BO start address to easily check if everything is CPU
+ * accessible.
+ */
+static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
+ struct drm_mm_node *node)
+{
+ unsigned long start;
+
+ start = node->start + node->size;
+ if (start > mem->num_pages)
+ start -= mem->num_pages;
+ else
+ start = 0;
+ mem->start = max(mem->start, start);
+}
+
+/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -356,44 +368,46 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
- u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
+ unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct amdgpu_vram_mgr_resource *vres;
- u64 size, remaining_size, lpfn, fpfn;
- struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
- unsigned long pages_per_block;
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
+ struct ttm_range_mgr_node *node;
+ struct drm_mm *mm = &mgr->mm;
+ enum drm_mm_insert_mode mode;
+ unsigned i;
int r;
- lpfn = place->lpfn << PAGE_SHIFT;
+ lpfn = place->lpfn;
if (!lpfn)
- lpfn = man->size;
-
- fpfn = place->fpfn << PAGE_SHIFT;
+ lpfn = man->size >> PAGE_SHIFT;
max_bytes = adev->gmc.mc_vram_size;
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+ mem_bytes = tbo->base.size;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- pages_per_block = ~0ul;
+ pages_per_node = ~0ul;
+ num_nodes = 1;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pages_per_block = HPAGE_PMD_NR;
+ pages_per_node = HPAGE_PMD_NR;
#else
/* default to 2MB */
- pages_per_block = 2UL << (20UL - PAGE_SHIFT);
+ pages_per_node = 2UL << (20UL - PAGE_SHIFT);
#endif
- pages_per_block = max_t(uint32_t, pages_per_block,
- tbo->page_alignment);
+ pages_per_node = max_t(uint32_t, pages_per_node,
+ tbo->page_alignment);
+ num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
}
- vres = kzalloc(sizeof(*vres), GFP_KERNEL);
- if (!vres)
+ node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!node)
return -ENOMEM;
- ttm_resource_init(tbo, place, &vres->base);
+ ttm_resource_init(tbo, place, &node->base);
/* bail out quickly if there's likely not enough VRAM for this BO */
if (ttm_resource_manager_usage(man) > max_bytes) {
@@ -401,130 +415,66 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- INIT_LIST_HEAD(&vres->blocks);
-
+ mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
- vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
-
- if (fpfn || lpfn != man->size)
- /* Allocate blocks in desired range */
- vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
-
- remaining_size = vres->base.num_pages << PAGE_SHIFT;
-
- mutex_lock(&mgr->lock);
- while (remaining_size) {
- if (tbo->page_alignment)
- min_block_size = tbo->page_alignment << PAGE_SHIFT;
- else
- min_block_size = mgr->default_page_size;
-
- BUG_ON(min_block_size < mm->chunk_size);
-
- /* Limit maximum size to 2GiB due to SG table limitations */
- size = min(remaining_size, 2ULL << 30);
-
- if (size >= pages_per_block << PAGE_SHIFT)
- min_block_size = pages_per_block << PAGE_SHIFT;
-
- cur_size = size;
-
- if (fpfn + size != place->lpfn << PAGE_SHIFT) {
- /*
- * Except for actual range allocation, modify the size and
- * min_block_size conforming to continuous flag enablement
- */
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- size = roundup_pow_of_two(size);
- min_block_size = size;
- /*
- * Modify the size value if size is not
- * aligned with min_block_size
- */
- } else if (!IS_ALIGNED(size, min_block_size)) {
- size = round_up(size, min_block_size);
+ mode = DRM_MM_INSERT_HIGH;
+
+ pages_left = node->base.num_pages;
+
+ /* Limit maximum size to 2GB due to SG table limitations */
+ pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
+
+ i = 0;
+ spin_lock(&mgr->lock);
+ while (pages_left) {
+ uint32_t alignment = tbo->page_alignment;
+
+ if (pages >= pages_per_node)
+ alignment = pages_per_node;
+
+ r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
+ alignment, 0, place->fpfn,
+ lpfn, mode);
+ if (unlikely(r)) {
+ if (pages > pages_per_node) {
+ if (is_power_of_2(pages))
+ pages = pages / 2;
+ else
+ pages = rounddown_pow_of_two(pages);
+ continue;
}
+ goto error_free;
}
- r = drm_buddy_alloc_blocks(mm, fpfn,
- lpfn,
- size,
- min_block_size,
- &vres->blocks,
- vres->flags);
- if (unlikely(r))
- goto error_free_blocks;
-
- if (size > remaining_size)
- remaining_size = 0;
- else
- remaining_size -= size;
- }
- mutex_unlock(&mgr->lock);
-
- if (cur_size != size) {
- struct drm_buddy_block *block;
- struct list_head *trim_list;
- u64 original_size;
- LIST_HEAD(temp);
-
- trim_list = &vres->blocks;
- original_size = vres->base.num_pages << PAGE_SHIFT;
-
- /*
- * If size value is rounded up to min_block_size, trim the last
- * block to the required size
- */
- if (!list_is_singular(&vres->blocks)) {
- block = list_last_entry(&vres->blocks, typeof(*block), link);
- list_move_tail(&block->link, &temp);
- trim_list = &temp;
- /*
- * Compute the original_size value by subtracting the
- * last block size with (aligned size - original size)
- */
- original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
- }
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
+ amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
+ pages_left -= pages;
+ ++i;
- mutex_lock(&mgr->lock);
- drm_buddy_block_trim(mm,
- original_size,
- trim_list);
- mutex_unlock(&mgr->lock);
-
- if (!list_empty(&temp))
- list_splice_tail(trim_list, &vres->blocks);
- }
-
- list_for_each_entry(block, &vres->blocks, link)
- vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
-
- block = amdgpu_vram_mgr_first_block(&vres->blocks);
- if (!block) {
- r = -EINVAL;
- goto error_fini;
+ if (pages > pages_left)
+ pages = pages_left;
}
+ spin_unlock(&mgr->lock);
- vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
-
- if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
- vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+ if (i == 1)
+ node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
if (adev->gmc.xgmi.connected_to_cpu)
- vres->base.bus.caching = ttm_cached;
+ node->base.bus.caching = ttm_cached;
else
- vres->base.bus.caching = ttm_write_combined;
+ node->base.bus.caching = ttm_write_combined;
atomic64_add(vis_usage, &mgr->vis_usage);
- *res = &vres->base;
+ *res = &node->base;
return 0;
-error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks);
- mutex_unlock(&mgr->lock);
+error_free:
+ while (i--)
+ drm_mm_remove_node(&node->mm_nodes[i]);
+ spin_unlock(&mgr->lock);
error_fini:
- ttm_resource_fini(man, &vres->base);
- kfree(vres);
+ ttm_resource_fini(man, &node->base);
+ kvfree(node);
return r;
}
@@ -540,26 +490,27 @@ error_fini:
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
- struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
uint64_t vis_usage = 0;
+ unsigned i, pages;
- mutex_lock(&mgr->lock);
- list_for_each_entry(block, &vres->blocks, link)
- vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+ spin_lock(&mgr->lock);
+ for (i = 0, pages = res->num_pages; pages;
+ pages -= node->mm_nodes[i].size, ++i) {
+ struct drm_mm_node *mm = &node->mm_nodes[i];
+ drm_mm_remove_node(mm);
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
+ }
amdgpu_vram_mgr_do_reserve(man);
-
- drm_buddy_free_list(mm, &vres->blocks);
- mutex_unlock(&mgr->lock);
+ spin_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
ttm_resource_fini(man, res);
- kfree(vres);
+ kvfree(node);
}
/**
@@ -591,7 +542,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
if (!*sgt)
return -ENOMEM;
- /* Determine the number of DRM_BUDDY blocks to export */
+ /* Determine the number of DRM_MM nodes to export */
amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
@@ -607,10 +558,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg->length = 0;
/*
- * Walk down DRM_BUDDY blocks to populate scatterlist nodes
- * @note: Use iterator api to get first the DRM_BUDDY block
+ * Walk down DRM_MM nodes to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_MM node
* and the number of bytes from it. Access the following
- * DRM_BUDDY block(s) if more buffer needs to exported
+ * DRM_MM node(s) if more buffer needs to exported
*/
amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
@@ -697,22 +648,13 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
- struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
- mutex_lock(&mgr->lock);
- drm_printf(printer, "default_page_size: %lluKiB\n",
- mgr->default_page_size >> 10);
-
- drm_buddy_print(mm, printer);
-
- drm_printf(printer, "reserved:\n");
- list_for_each_entry(block, &mgr->reserved_pages, link)
- drm_buddy_block_print(mm, block, printer);
- mutex_unlock(&mgr->lock);
+ spin_lock(&mgr->lock);
+ drm_mm_print(&mgr->mm, printer);
+ spin_unlock(&mgr->lock);
}
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
@@ -732,21 +674,16 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager;
- int err;
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
man->func = &amdgpu_vram_mgr_func;
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
-
- mutex_init(&mgr->lock);
+ drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
+ spin_lock_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
- mgr->default_page_size = PAGE_SIZE;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
@@ -774,16 +711,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
if (ret)
return;
- mutex_lock(&mgr->lock);
- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
+ spin_lock(&mgr->lock);
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
kfree(rsv);
- list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
- drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+ list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
+ drm_mm_remove_node(&rsv->mm_node);
kfree(rsv);
}
- drm_buddy_fini(&mgr->mm);
- mutex_unlock(&mgr->lock);
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
deleted file mode 100644
index 9a2db87186c7..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: MIT
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __AMDGPU_VRAM_MGR_H__
-#define __AMDGPU_VRAM_MGR_H__
-
-#include <drm/drm_buddy.h>
-
-struct amdgpu_vram_mgr {
- struct ttm_resource_manager manager;
- struct drm_buddy mm;
- /* protects access to buffer objects */
- struct mutex lock;
- struct list_head reservations_pending;
- struct list_head reserved_pages;
- atomic64_t vis_usage;
- u64 default_page_size;
-};
-
-struct amdgpu_vram_mgr_resource {
- struct ttm_resource base;
- struct list_head blocks;
- unsigned long flags;
-};
-
-static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
-{
- return drm_buddy_block_offset(block);
-}
-
-static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
-{
- return PAGE_SIZE << drm_buddy_block_order(block);
-}
-
-static inline struct drm_buddy_block *
-amdgpu_vram_mgr_first_block(struct list_head *list)
-{
- return list_first_entry_or_null(list, struct drm_buddy_block, link);
-}
-
-static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
-{
- struct drm_buddy_block *block;
- u64 start, size;
-
- block = amdgpu_vram_mgr_first_block(head);
- if (!block)
- return false;
-
- while (head != block->link.next) {
- start = amdgpu_vram_mgr_block_start(block);
- size = amdgpu_vram_mgr_block_size(block);
-
- block = list_entry(block->link.next, struct drm_buddy_block, link);
- if (start + size != amdgpu_vram_mgr_block_start(block))
- return false;
- }
-
- return true;
-}
-
-static inline struct amdgpu_vram_mgr_resource *
-to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
-{
- return container_of(res, struct amdgpu_vram_mgr_resource, base);
-}
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 288fce7dc0ed..9c964cd3b5d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2796,7 +2796,8 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index cbe5250b31cb..e0ad9f27dc3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2914,7 +2914,8 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 982855e6cf52..3caf6f386042 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2673,7 +2673,8 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 84440741c60b..7c75df5bffed 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2693,7 +2693,8 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 8c0a3fc7aaa6..a4a6751b1e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
+ dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
}
static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
@@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade
memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
- *(uint64_t *)fw_autoload_mask |= 1 << id;
+ *(uint64_t *)fw_autoload_mask |= 1ULL << id;
}
static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
@@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
return 0;
}
-void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
@@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
break;
default:
BUG();
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index a0c0b7d9f444..7f4b480ae66e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+#ifdef CONFIG_X86_64
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
+ adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
+ adev->gmc.aper_size = adev->gmc.real_vram_size;
+ }
+#endif
/* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 5d2dfeff8fe5..d63d3f2b8a16 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] =
IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0)
};
-void program_imu_rlc_ram(struct amdgpu_device *adev,
+static void program_imu_rlc_ram(struct amdgpu_device *adev,
const struct imu_rlc_ram_golden *regs,
const u32 array_size)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index d2722adabd1b..f3c1af5130ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT));
+
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
@@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
8 + 16,
- .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */
+ .emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */
.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 1a03baa59755..654e43e83e2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -41,6 +41,7 @@
#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET 0x4149
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index fcf51947bb18..7eee004cf3ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
/* This function is for backdoor MES firmware */
static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
+ enum admgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
@@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
/* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF);
- /* invalidate ICACHE */
- data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
- data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
- data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
- WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
-
- /* prime the ICACHE. */
- data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
- data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
- WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
+ if (prime_icache) {
+ /* invalidate ICACHE */
+ data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
+ data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
+ data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
+ WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
+
+ /* prime the ICACHE. */
+ data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
+ data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
+ WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
+ }
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
int r = 0;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE);
+
+ r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
if (r) {
- DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
+ DRM_ERROR("failed to load MES fw, r=%d\n", r);
return r;
}
- r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE);
+ r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
if (r) {
- DRM_ERROR("failed to load MES fw, r=%d\n", r);
+ DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
return r;
}
+
}
mes_v11_0_enable(adev, true);
@@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle)
if (!adev->enable_mes_kiq) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = mes_v11_0_load_microcode(adev,
- AMDGPU_MES_SCHED_PIPE);
+ AMDGPU_MES_SCHED_PIPE, true);
if (r) {
DRM_ERROR("failed to MES fw, r=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index d016e3c3e221..b3fba8dea63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs yc_video_codecs_decode = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 06b2635b142a..83c6ccaaa9e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
}
+
/**
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
*
@@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
- * context switch for an instance
+ * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
- * @instance_idx: the index of the SDMA instance
+ * @enable: enable/disable the DMA MEs context switch.
*
- * Unhalt the async dma engines context switch.
+ * Halt or unhalt the async dma engines context switch.
*/
-static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
+static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl, phase_quantum = 0;
-
- if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
- return;
- }
+ int i;
if (amdgpu_sdma_phase_quantum) {
unsigned value = amdgpu_sdma_phase_quantum;
@@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev,
phase_quantum =
value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
-
- WREG32_SOC15_IP(GC,
- sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
- phase_quantum);
- WREG32_SOC15_IP(GC,
- sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
- phase_quantum);
- WREG32_SOC15_IP(GC,
- sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
- phase_quantum);
}
- if (!amdgpu_sriov_vf(adev)) {
- f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (enable && amdgpu_sdma_phase_quantum) {
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
+ phase_quantum);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
+ phase_quantum);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
+ phase_quantum);
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ }
}
+
}
/**
- * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
+ * sdma_v5_2_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
*
- * Halt the async dma engines context switch.
+ * Halt or unhalt the async dma engines.
*/
-static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
+static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
- if (amdgpu_sriov_vf(adev))
- return;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ if (!enable) {
+ sdma_v5_2_gfx_stop(adev);
+ sdma_v5_2_rlc_stop(adev);
}
-}
-
-/**
- * sdma_v5_2_halt - stop the async dma engines
- *
- * @adev: amdgpu_device pointer
- *
- * Halt the async dma engines.
- */
-static void sdma_v5_2_halt(struct amdgpu_device *adev)
-{
- int i;
- u32 f32_cntl;
-
- sdma_v5_2_gfx_stop(adev);
- sdma_v5_2_rlc_stop(adev);
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
}
}
@@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them.
- * It assumes that the dma engine is stopped for each instance.
- * The function enables the engine and preemptions sequentially for each instance.
- *
* Returns 0 for success, error for failure.
*/
static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
@@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
ring->sched.ready = true;
- sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v5_2_ctx_switch_enable(adev, true);
+ sdma_v5_2_enable(adev, true);
+ }
r = amdgpu_ring_test_ring(ring);
if (r) {
@@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
int i, j;
/* halt the MEs */
- sdma_v5_2_halt(adev);
+ sdma_v5_2_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!adev->sdma.instance[i].fw)
@@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
int r = 0;
if (amdgpu_sriov_vf(adev)) {
- sdma_v5_2_ctx_switch_disable_all(adev);
- sdma_v5_2_halt(adev);
+ sdma_v5_2_ctx_switch_enable(adev, false);
+ sdma_v5_2_enable(adev, false);
/* set RB registers */
r = sdma_v5_2_gfx_resume(adev);
@@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
sdma_v5_2_soft_reset(adev);
+ /* unhalt the MEs */
+ sdma_v5_2_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v5_2_ctx_switch_enable(adev, true);
- /* Soft reset supposes to disable the dma engine and preemption.
- * Now start the gfx rings and rlc compute queues.
- */
+ /* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
if (adev->in_s0ix)
amdgpu_gfx_off_ctrl(adev, true);
@@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
- sdma_v5_2_ctx_switch_disable_all(adev);
- sdma_v5_2_halt(adev);
+ sdma_v5_2_ctx_switch_enable(adev, false);
+ sdma_v5_2_enable(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3cabceee5f57..39405f0db824 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job)
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
+ if (atomic_read(&p->entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
- drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+ drm_sched_entity_modify_sched(p->entity, scheds, 1);
return 0;
}
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
- uint64_t addr)
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v3_0_limit_sched(p, job);
+ r = vcn_v3_0_limit_sched(p);
if (r)
goto out;
}
@@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+ struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
uint32_t msg_lo = 0, msg_hi = 0;
unsigned i;
int r;
@@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
msg_hi = val;
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
val == 0) {
- r = vcn_v3_0_dec_msg(p, job,
- ((u64)msg_hi) << 32 | msg_lo);
+ r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 5e9adbc71bbd..cbfb32b3d235 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
break;
case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
+ case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
pcache_info = yellow_carp_cache_info;
num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 8667e3df2d0b..a08769c5e94b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(4, 1, 2):/* RENOIR */
case IP_VERSION(5, 2, 1):/* VANGOGH */
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
+ case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
+ case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
case IP_VERSION(6, 0, 1):
kfd->device_info.num_sdma_queues_per_engine = 2;
break;
@@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
case IP_VERSION(10, 3, 1): /* VANGOGH */
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
+ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
+ case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 1, 10): /* NAVI10 */
@@ -178,7 +182,11 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
if (gc_version < IP_VERSION(11, 0, 0)) {
/* Navi2x+, Navi1x+ */
- if (gc_version >= IP_VERSION(10, 3, 0))
+ if (gc_version == IP_VERSION(10, 3, 6))
+ kfd->device_info.no_atomic_fw_version = 14;
+ else if (gc_version == IP_VERSION(10, 3, 7))
+ kfd->device_info.no_atomic_fw_version = 3;
+ else if (gc_version >= IP_VERSION(10, 3, 0))
kfd->device_info.no_atomic_fw_version = 92;
else if (gc_version >= IP_VERSION(10, 1, 1))
kfd->device_info.no_atomic_fw_version = 145;
@@ -368,6 +376,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v10_3_kfd2kgd;
break;
+ case IP_VERSION(10, 3, 6):
+ gfx_target_version = 100306;
+ if (!vf)
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 7):
+ gfx_target_version = 100307;
+ if (!vf)
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
case IP_VERSION(11, 0, 0):
gfx_target_version = 110000;
f2g = &gfx_v11_kfd2kgd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 997650d597ec..e44376c2ecdc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch)
{
- uint64_t npages = migrate->cpages;
+ uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
dma_addr_t *src;
@@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
mfence);
if (r)
goto out_free_vram_pages;
- amdgpu_res_next(&cursor, j << PAGE_SHIFT);
+ amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
j = 0;
} else {
amdgpu_res_next(&cursor, PAGE_SIZE);
@@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
continue;
}
src[i] = svm_migrate_addr(adev, spage);
- if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
+ if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
r = svm_migrate_copy_memory_gart(adev, dst + i - j,
src + i - j, j,
FROM_VRAM_TO_RAM,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 2ebf0132c25b..7b332246eda3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
pte_flags,
- last_start - prange->start,
+ (last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
NULL, dma_addr, &vm->last_update);
@@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
if (range->event == MMU_NOTIFY_RELEASE)
return true;
+ if (!mmget_not_zero(mni->mm))
+ return true;
start = mni->interval_tree.start;
last = mni->interval_tree.last;
@@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
}
svm_range_unlock(prange);
+ mmput(mni->mm);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index b4029c0d5d8c..0ba0598eba20 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 70be67a56673..3087dd1a1856 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -72,6 +72,7 @@
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/component.h>
+#include <linux/dmi.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -462,6 +463,26 @@ static void dm_pflip_high_irq(void *interrupt_params)
vrr_active, (int) !e);
}
+static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+{
+ struct drm_crtc *crtc = &acrtc->base;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Send completion event for cursor-only commits */
+ if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ drm_crtc_send_vblank_event(crtc, acrtc->event);
+ drm_crtc_vblank_put(crtc);
+ acrtc->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -500,7 +521,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* if a pageflip happened inside front-porch.
*/
if (vrr_active) {
- drm_crtc_handle_vblank(&acrtc->base);
+ dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
if (acrtc->dm_irq_params.stream &&
@@ -552,7 +573,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!vrr_active)
- drm_crtc_handle_vblank(&acrtc->base);
+ dm_crtc_handle_vblank(acrtc);
/**
* Following stuff must happen at start of vblank, for crc
@@ -1382,6 +1403,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {}
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ const struct dmi_system_id *dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+
+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+ if (dmi_id) {
+ dm->aux_hpd_discon_quirk = true;
+ DRM_INFO("aux_hpd_discon_quirk attached\n");
+ }
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1508,6 +1564,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
INIT_LIST_HEAD(&adev->dm.da_list);
+
+ retrieve_dmi_info(&adev->dm);
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1594,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
#endif
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
@@ -1630,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point, for
+ * example HPD from DPIA.
+ */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc))
+ dc_enable_dmub_outbox(adev->dm.dc);
+
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@@ -2619,9 +2685,6 @@ static int dm_resume(void *handle)
*/
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
- if (dc_enable_dmub_notifications(adev->dm.dc))
- amdgpu_dm_outbox_init(adev);
-
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2639,6 +2702,11 @@ static int dm_resume(void *handle)
}
}
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
WARN_ON(!dc_commit_state(dm->dc, dc_state));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2660,13 +2728,15 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
- /* Re-enable outbox interrupts for DPIA. */
- if (dc_enable_dmub_notifications(adev->dm.dc))
- amdgpu_dm_outbox_init(adev);
-
/* Before powering on DC we need to re-initialize DMUB. */
dm_dmub_hw_resume(adev);
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -2812,7 +2882,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
- u32 max_cll, min_cll, max, min, q, r;
+ u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
@@ -2842,7 +2912,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[i];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
@@ -2870,8 +2940,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
* The results of the above expressions can be verified at
* pre_computed_values.
*/
- q = max_cll >> 5;
- r = max_cll % 32;
+ q = max_avg >> 5;
+ r = max_avg % 32;
max = (1 << q) * pre_computed_values[r];
// min luminance: maxLum * (CV/255)^2 / 100
@@ -3822,7 +3892,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
@@ -4259,9 +4330,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
- /* Disable vblank IRQs aggressively for power-saving. */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -5409,7 +5477,7 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
}
}
- if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+ if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
*pre_multiplied_alpha = false;
}
@@ -9137,6 +9205,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct amdgpu_bo *abo;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool cursor_update = false;
bool pflip_present = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
@@ -9172,8 +9241,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
/* Cursor plane is handled after stream updates */
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if ((fb && crtc == pcrtc) ||
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ cursor_update = true;
+
continue;
+ }
if (!fb || !crtc || pcrtc != crtc)
continue;
@@ -9336,6 +9410,17 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
}
+ } else if (cursor_update && acrtc_state->active_planes > 0 &&
+ !acrtc_state->force_dpms_off &&
+ acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
/* Update the planes if changed or disable if we don't have any. */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index aa34c0068f41..e80ef93f6550 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -540,6 +540,14 @@ struct amdgpu_display_manager {
* last successfully applied backlight values.
*/
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
+
+ /**
+ * @aux_hpd_discon_quirk:
+ *
+ * quirk for hpd discon while aux is on-going.
+ * occurred on certain intel platform
+ */
+ bool aux_hpd_discon_quirk;
};
enum dsc_clock_force_state {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 9221b6690a4a..2b9b095e5f03 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -56,6 +56,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
ssize_t result = 0;
struct aux_payload payload;
enum aux_return_code_type operation_result;
+ struct amdgpu_device *adev;
+ struct ddc_service *ddc;
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -74,6 +76,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
+ /*
+ * w/a on certain intel platform where hpd is unexpected to pull low during
+ * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON
+ * aux transaction is succuess in such case, therefore bypass the error
+ */
+ ddc = TO_DM_AUX(aux)->ddc_service;
+ adev = ddc->ctx->driver_context;
+ if (adev->dm.aux_hpd_discon_quirk) {
+ if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
+ operation_result == AUX_RET_ERROR_HPD_DISCON) {
+ result = 0;
+ operation_result = AUX_RET_SUCCESS;
+ }
+ }
+
if (payload.write && result >= 0)
result = msg->size;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index ceb34376decb..bca5f01da763 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
void dcn31_init_clocks(struct clk_mgr *clk_mgr)
{
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
@@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
}
}
+int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+{
+ return clk_mgr_base->clks.ref_dtbclk_khz;
+}
+
static struct clk_mgr_funcs dcn31_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn31_update_clocks,
.init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn31_enable_pme_wa,
@@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct(
}
clk_mgr->base.base.dprefclk_khz = 600000;
- clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
//clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
index 961b10a49486..be06fdbd0c22 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
@@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base);
+
void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
#endif //__DCN31_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index a2ade6e93f5e..f4381725b210 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -41,9 +41,7 @@
#include "dc_dmub_srv.h"
-#if defined (CONFIG_DRM_AMD_DC_DP2_0)
#include "dc_link_dp.h"
-#endif
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
@@ -552,7 +550,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
if (!bw_params->clk_table.entries[i].dtbclk_mhz)
bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
}
- ASSERT(bw_params->clk_table.entries[i].dcfclk_mhz);
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
if (!bw_params->num_channels)
@@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base)
static struct clk_mgr_funcs dcn315_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn315_update_clocks,
.init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn315_enable_pme_wa,
@@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
+ clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn315_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index fc3af81ed6c6..e4bb9c6193b5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
static struct clk_mgr_funcs dcn316_funcs = {
.enable_pme_wa = dcn316_enable_pme_wa,
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn316_update_clocks,
.init_clocks = dcn31_init_clocks,
.are_clock_states_equal = dcn31_are_clock_states_equal,
@@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
+ clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
/*clk_mgr->base.dccg->ref_dtbclk_khz =
dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index dc30ac366a50..d8eee89e63ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = {
static bool decide_fallback_link_setting(
struct dc_link *link,
- struct dc_link_settings initial_link_settings,
- struct dc_link_settings *current_link_setting,
+ struct dc_link_settings *max,
+ struct dc_link_settings *cur,
enum link_training_result training_result);
static void maximize_lane_settings(const struct link_training_settings *lt_settings,
struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
@@ -944,7 +944,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti
return;
- for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
if (lt_settings->voltage_swing)
lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
if (lt_settings->pre_emphasis)
@@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries(
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
struct dc_link_settings cur_link_settings = *link_setting;
+ struct dc_link_settings max_link_settings = *link_setting;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
int fail_count = 0;
bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
@@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries(
dp_trace_commit_lt_init(link);
-
if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
/* We need to do this before the link training to ensure the idle
* pattern in SST mode will be sent right after the link training
@@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries(
uint32_t req_bw;
uint32_t link_bw;
- decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status);
- /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
- * minimum link bandwidth.
+ decide_fallback_link_setting(link, &max_link_settings,
+ &cur_link_settings, status);
+ /* Fail link training if reduced link bandwidth no longer meets
+ * stream requirements.
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
- is_link_bw_low = (req_bw > link_bw);
- is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE));
-
- if (is_link_bw_low)
- DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
- __func__, req_bw, link_bw);
+ if (req_bw > link_bw)
+ break;
}
msleep(delay_between_attempts);
@@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap(
int *fail_count)
{
struct dc_link_settings cur_link_settings = {0};
- struct dc_link_settings initial_link_settings = *known_limit_link_setting;
+ struct dc_link_settings max_link_settings = *known_limit_link_setting;
bool success = false;
bool skip_video_pattern;
enum clock_source_id dp_cs_id = get_clock_source_id(link);
@@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap(
struct link_resource link_res;
memset(&irq_data, 0, sizeof(irq_data));
- cur_link_settings = initial_link_settings;
+ cur_link_settings = max_link_settings;
/* Grant extended timeout request */
if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
@@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap(
dp_trace_lt_result_update(link, status, true);
dp_disable_link_phy(link, &link_res, link->connector_signal);
} while (!success && decide_fallback_link_setting(link,
- initial_link_settings, &cur_link_settings, status));
+ &max_link_settings, &cur_link_settings, status));
link->verified_link_cap = success ?
cur_link_settings : fail_safe_link_settings;
@@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy(
*/
static bool decide_fallback_link_setting(
struct dc_link *link,
- struct dc_link_settings initial_link_settings,
- struct dc_link_settings *current_link_setting,
+ struct dc_link_settings *max,
+ struct dc_link_settings *cur,
enum link_training_result training_result)
{
- if (!current_link_setting)
+ if (!cur)
+ return false;
+ if (!max)
return false;
- if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
+
+ if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
link->dc->debug.force_dp2_lt_fallback_method)
- return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings,
- current_link_setting, training_result);
+ return decide_fallback_link_setting_max_bw_policy(link, max, cur,
+ training_result);
switch (training_result) {
case LINK_TRAINING_CR_FAIL_LANE0:
@@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting(
case LINK_TRAINING_CR_FAIL_LANE23:
case LINK_TRAINING_LQA_FAIL:
{
- if (!reached_minimum_link_rate
- (current_link_setting->link_rate)) {
- current_link_setting->link_rate =
- reduce_link_rate(
- current_link_setting->link_rate);
- } else if (!reached_minimum_lane_count
- (current_link_setting->lane_count)) {
- current_link_setting->link_rate =
- initial_link_settings.link_rate;
+ if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(cur->link_rate);
+ } else if (!reached_minimum_lane_count(cur->lane_count)) {
+ cur->link_rate = max->link_rate;
if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
return false;
else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
- current_link_setting->lane_count =
- LANE_COUNT_ONE;
- else if (training_result ==
- LINK_TRAINING_CR_FAIL_LANE23)
- current_link_setting->lane_count =
- LANE_COUNT_TWO;
+ cur->lane_count = LANE_COUNT_ONE;
+ else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
+ cur->lane_count = LANE_COUNT_TWO;
else
- current_link_setting->lane_count =
- reduce_lane_count(
- current_link_setting->lane_count);
+ cur->lane_count = reduce_lane_count(cur->lane_count);
} else {
return false;
}
@@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting(
}
case LINK_TRAINING_EQ_FAIL_EQ:
{
- if (!reached_minimum_lane_count
- (current_link_setting->lane_count)) {
- current_link_setting->lane_count =
- reduce_lane_count(
- current_link_setting->lane_count);
- } else if (!reached_minimum_link_rate
- (current_link_setting->link_rate)) {
- current_link_setting->link_rate =
- reduce_link_rate(
- current_link_setting->link_rate);
- current_link_setting->lane_count = initial_link_settings.lane_count;
+ if (!reached_minimum_lane_count(cur->lane_count)) {
+ cur->lane_count = reduce_lane_count(cur->lane_count);
+ } else if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(cur->link_rate);
+ /* Reduce max link rate to avoid potential infinite loop.
+ * Needed so that any subsequent CR_FAIL fallback can't
+ * re-set the link rate higher than the link rate from
+ * the latest EQ_FAIL fallback.
+ */
+ max->link_rate = cur->link_rate;
+ cur->lane_count = max->lane_count;
} else {
return false;
}
@@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting(
}
case LINK_TRAINING_EQ_FAIL_CR:
{
- if (!reached_minimum_link_rate
- (current_link_setting->link_rate)) {
- current_link_setting->link_rate =
- reduce_link_rate(
- current_link_setting->link_rate);
- current_link_setting->lane_count = initial_link_settings.lane_count;
+ if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(cur->link_rate);
+ /* Reduce max link rate to avoid potential infinite loop.
+ * Needed so that any subsequent CR_FAIL fallback can't
+ * re-set the link rate higher than the link rate from
+ * the latest EQ_FAIL fallback.
+ */
+ max->link_rate = cur->link_rate;
+ cur->lane_count = max->lane_count;
} else {
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 6774dd8bb53e..3fe3fbac1e63 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1117,12 +1117,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* on certain displays, such as the Sharp 4k. 36bpp is needed
* to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
* SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
- * precision on at least DCN display engines. However, at least
- * Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
- * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
- * did not show such problems, so this seems to be the exception.
+ * precision on DCN display engines, but apparently not for DCE, as
+ * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have
+ * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
+ * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel
+ * passthrough). Therefore only use 36 bpp on DCN where it is actually needed.
*/
- if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
+ if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
else
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3960c74482be..817028d3c4a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.186"
+#define DC_VER "3.2.187"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -416,6 +416,7 @@ struct dc_clocks {
bool p_state_change_support;
enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
+ int ref_dtbclk_khz;
enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
@@ -719,6 +720,8 @@ struct dc_debug_options {
bool apply_vendor_specific_lttpr_wa;
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
+ /* uses value at boot and disables switch */
+ bool disable_dtb_ref_clk_switch;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 7eff7811769d..5f2afa5b4814 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1766,29 +1766,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
break;
}
}
-
- /*
- * TO-DO: So far the code logic below only addresses single eDP case.
- * For dual eDP case, there are a few things that need to be
- * implemented first:
- *
- * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
- * stream[0 or 1] will all be checked.
- *
- * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
- * for each eDP.
- *
- * Once above 2 things are completed, we can then change the logic below
- * correspondingly, so dual eDP case will be fully covered.
- */
-
- // We are trying to enable eDP, don't power down VDD if eDP stream is existing
- if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
+ // We are trying to enable eDP, don't power down VDD
+ if (can_apply_edp_fast_boot)
keep_edp_vdd_on = true;
- DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
- } else {
- DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
- }
}
// Check seamless boot support
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 970b65efeac1..eaa7032f0f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -212,6 +212,9 @@ static void dpp2_cnv_setup (
break;
}
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
if (is_2bit == 1 && alpha_2bit_lut != NULL) {
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
index 8b6505b7dca8..f50ab961bc17 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -153,6 +153,9 @@ static void dpp201_cnv_setup(
break;
}
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
if (is_2bit == 1 && alpha_2bit_lut != NULL) {
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index ab3918c0a15b..0dcc07531643 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -294,6 +294,9 @@ static void dpp3_cnv_setup (
break;
}
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
if (is_2bit == 1 && alpha_2bit_lut != NULL) {
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 287a1066b547..bbc58d167c63 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -513,12 +513,10 @@ void dccg31_set_physymclk(
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
static void dccg31_set_dtbclk_dto(
struct dccg *dccg,
- int dtbclk_inst,
- int req_dtbclk_khz,
- int num_odm_segments,
- const struct dc_crtc_timing *timing)
+ struct dtbclk_dto_params *params)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ int req_dtbclk_khz = params->pixclk_khz;
uint32_t dtbdto_div;
/* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register
@@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto(
* DSC native 4:2:2 pixel rate/2 4
* Other modes pixel rate 8
*/
- if (num_odm_segments == 4) {
+ if (params->num_odm_segments == 4) {
dtbdto_div = 2;
- req_dtbclk_khz = req_dtbclk_khz / 4;
- } else if ((num_odm_segments == 2) ||
- (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
- (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
- && !timing->dsc_cfg.ycbcr422_simple)) {
+ req_dtbclk_khz = params->pixclk_khz / 4;
+ } else if ((params->num_odm_segments == 2) ||
+ (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+ (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
+ && !params->timing->dsc_cfg.ycbcr422_simple)) {
dtbdto_div = 4;
- req_dtbclk_khz = req_dtbclk_khz / 2;
+ req_dtbclk_khz = params->pixclk_khz / 2;
} else
dtbdto_div = 8;
- if (dccg->ref_dtbclk_khz && req_dtbclk_khz) {
+ if (params->ref_dtbclk_khz && req_dtbclk_khz) {
uint32_t modulo, phase;
// phase / modulo = dtbclk / dtbclk ref
- modulo = dccg->ref_dtbclk_khz * 1000;
- phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1),
- dccg->ref_dtbclk_khz);
+ modulo = params->ref_dtbclk_khz * 1000;
+ phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1),
+ params->ref_dtbclk_khz);
- REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
- DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div);
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
- REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo);
- REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase);
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase);
- REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
- DTBCLK_DTO_ENABLE[dtbclk_inst], 1);
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 1);
- REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
- DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1,
+ REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1,
1, 100);
/* The recommended programming sequence to enable DTBCLK DTO to generate
* valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
* be set only after DTO is enabled
*/
- REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
- PIPE_DTO_SRC_SEL[dtbclk_inst], 1);
-
- dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz;
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ PIPE_DTO_SRC_SEL[params->otg_inst], 1);
} else {
- REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst],
- DTBCLK_DTO_ENABLE[dtbclk_inst], 0,
- PIPE_DTO_SRC_SEL[dtbclk_inst], 0,
- DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div);
+ REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 0,
+ PIPE_DTO_SRC_SEL[params->otg_inst], 0,
+ DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div);
- REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0);
- REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0);
-
- dccg->dtbclk_khz[dtbclk_inst] = 0;
+ REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
+ REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
}
}
@@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto(
REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK
-
- dccg->audio_dtbclk_khz = req_audio_dtbclk_khz;
} else {
REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0);
REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0);
REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO
-
- dccg->audio_dtbclk_khz = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index d94fd1010deb..8b12b4111c88 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc)
AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3
AUX_RX_DETECTION_THRESHOLD [30:28] = 1
*/
- AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
-
- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
// Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
index 789f7562cdc7..d2273674e872 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
@@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
-#if defined (CONFIG_DRM_AMD_DC_DP2_0)
if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
-#endif
}
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 46ce5a0ee4ec..b5570aa8e39d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -237,6 +237,7 @@ struct clk_mgr_funcs {
bool safe_to_lower);
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+ int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr);
void (*set_low_power_state)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index b2fa4de47734..c7021915bac8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -60,8 +60,17 @@ struct dccg {
const struct dccg_funcs *funcs;
int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
- int dtbclk_khz[MAX_PIPES];
- int audio_dtbclk_khz;
+ //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
+ //int audio_dtbclk_khz;/* TODO needs to be removed */
+ int ref_dtbclk_khz;/* TODO needs to be removed */
+};
+
+struct dtbclk_dto_params {
+ const struct dc_crtc_timing *timing;
+ int otg_inst;
+ int pixclk_khz;
+ int req_audio_dtbclk_khz;
+ int num_odm_segments;
int ref_dtbclk_khz;
};
@@ -111,10 +120,7 @@ struct dccg_funcs {
void (*set_dtbclk_dto)(
struct dccg *dccg,
- int dtbclk_inst,
- int req_dtbclk_khz,
- int num_odm_segments,
- const struct dc_crtc_timing *timing);
+ struct dtbclk_dto_params *dto_params);
void (*set_audio_dtbclk_dto)(
struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 87972dc8443d..ea6cf8bfce30 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -27,6 +27,7 @@
#include "core_types.h"
#include "dccg.h"
#include "dc_link_dp.h"
+#include "clk_mgr.h"
static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
{
@@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
struct dccg *dccg = dc->res_pool->dccg;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
- int odm_segment_count = get_odm_segment_count(pipe_ctx);
+ struct dtbclk_dto_params dto_params = {0};
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+
dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
- dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk,
- odm_segment_count,
- &pipe_ctx->stream->timing);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
}
@@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct dccg *dccg = dc->res_pool->dccg;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
stream_enc->funcs->disable(stream_enc);
- dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 7c9330a61ac1..c7bd7e216710 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
const uint32_t timeout = 100;
- uint32_t in_reset, scratch, i;
+ uint32_t in_reset, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
udelay(1);
}
+ for (i = 0; i < timeout; ++i) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
+ break;
+
+ udelay(1);
+ }
/* Force reset in case we timed out, DMCUB is likely hung. */
}
@@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
+ REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
+ REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
/* Clear the GPINT command manually so we don't send anything during boot. */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index 59ddc81b5a0e..f6db6f89d45d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -151,7 +151,8 @@ struct dmub_srv;
DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \
DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
- DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
+ DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn31_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 73b9e0a87e54..20a3d4e23f66 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -127,6 +127,8 @@ struct av_sync_data {
static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
+
/*MST Dock*/
static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
index 247c6e9632ba..1cb399dbc7cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
@@ -22,6 +22,7 @@
#ifndef SMU_11_0_7_PPTABLE_H
#define SMU_11_0_7_PPTABLE_H
+#pragma pack(push, 1)
#define SMU_11_0_7_TABLE_FORMAT_REVISION 15
@@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table
uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings
uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings
int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings
-} __attribute__((packed));
+};
enum SMU_11_0_7_PPCLOCK_ID {
SMU_11_0_7_PPCLOCK_GFXCLK = 0,
@@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table
uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT
uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
-} __attribute__((packed));
+};
struct smu_11_0_7_powerplay_table
{
@@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table
struct smu_11_0_7_overdrive_table overdrive_table;
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
-} __attribute__((packed));
+};
+
+#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
index 7a63cf8e85ed..0116e3d04fad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
@@ -22,6 +22,7 @@
#ifndef SMU_11_0_PPTABLE_H
#define SMU_11_0_PPTABLE_H
+#pragma pack(push, 1)
#define SMU_11_0_TABLE_FORMAT_REVISION 12
@@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table
uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags
uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings
uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings
-} __attribute__((packed));
+};
enum SMU_11_0_PPCLOCK_ID {
SMU_11_0_PPCLOCK_GFXCLK = 0,
@@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table
uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
-} __attribute__((packed));
+};
struct smu_11_0_powerplay_table
{
@@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table
#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
#endif
-} __attribute__((packed));
+};
+
+#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
index 3f29f4327378..478862ded0bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
@@ -22,6 +22,8 @@
#ifndef SMU_13_0_7_PPTABLE_H
#define SMU_13_0_7_PPTABLE_H
+#pragma pack(push, 1)
+
#define SMU_13_0_7_TABLE_FORMAT_REVISION 15
//// POWERPLAYTABLE::ulPlatformCaps
@@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table
struct smu_13_0_7_overdrive_table overdrive_table;
uint8_t padding1;
PPTable_t smc_pptable; //PPTable_t in driver_if.h
-} __attribute__((packed));
+};
+#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
index 1f311396b706..043307485528 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
@@ -22,6 +22,8 @@
#ifndef SMU_13_0_PPTABLE_H
#define SMU_13_0_PPTABLE_H
+#pragma pack(push, 1)
+
#define SMU_13_0_TABLE_FORMAT_REVISION 1
//// POWERPLAYTABLE::ulPlatformCaps
@@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table {
uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags
uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings
uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings
-} __attribute__((packed));
+};
enum SMU_13_0_PPCLOCK_ID {
SMU_13_0_PPCLOCK_GFXCLK = 0,
@@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table {
uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
-} __attribute__((packed));
+};
struct smu_13_0_powerplay_table {
struct atom_common_table_header header;
@@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table {
#ifndef SMU_13_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in driver_if.h
#endif
-} __attribute__((packed));
+};
+
+#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 5f8809f6990d..2fbd2926a531 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1228,6 +1228,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
+ if (speed == 0)
+ return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
* for minimum fan speed:
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 4551bc8a3ecf..f573d582407e 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
}
if (bDPExecute)
- ast->tx_chip_type = AST_TX_ASTDP;
+ ast->tx_chip_types |= BIT(AST_TX_ASTDP);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
(u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
ASTDP_HOST_EDID_READ_DONE);
- } else
- ast->tx_chip_type = AST_TX_NONE;
+ }
}
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 204c926a18ea..4f75a9efb610 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev)
ast_init_dvo(dev);
break;
default:
- if (ast->tx_chip_type == AST_TX_SIL164)
+ if (ast->tx_chip_types & BIT(AST_TX_SIL164))
ast_init_dvo(dev);
else
ast_init_analog(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index afebe35f205e..a34db4380f68 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -73,6 +73,11 @@ enum ast_tx_chip {
AST_TX_ASTDP,
};
+#define AST_TX_NONE_BIT BIT(AST_TX_NONE)
+#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164)
+#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
+#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
+
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@@ -173,7 +178,7 @@ struct ast_private {
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
- union {
+ struct {
struct {
struct drm_encoder encoder;
struct ast_vga_connector vga_connector;
@@ -199,7 +204,7 @@ struct ast_private {
ast_use_defaults
} config_mode;
- enum ast_tx_chip tx_chip_type;
+ unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
};
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index d770d5a23c1a..067453266897 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
/* Check 3rd Tx option (digital output afaik) */
- ast->tx_chip_type = AST_TX_NONE;
+ ast->tx_chip_types |= AST_TX_NONE_BIT;
/*
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
@@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
if (!*need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
- ast->tx_chip_type = AST_TX_SIL164;
+ ast->tx_chip_types = AST_TX_SIL164_BIT;
}
if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) {
@@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg) {
case 0x04:
- ast->tx_chip_type = AST_TX_SIL164;
+ ast->tx_chip_types = AST_TX_SIL164_BIT;
break;
case 0x08:
ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
@@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
fallthrough;
case 0x0c:
- ast->tx_chip_type = AST_TX_DP501;
+ ast->tx_chip_types = AST_TX_DP501_BIT;
}
} else if (ast->chip == AST2600)
ast_dp_launch(&ast->base, 0);
/* Print stuff for diagnostic purposes */
- switch(ast->tx_chip_type) {
- case AST_TX_SIL164:
+ if (ast->tx_chip_types & AST_TX_NONE_BIT)
+ drm_info(dev, "Using analog VGA\n");
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
- break;
- case AST_TX_DP501:
+ if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
- break;
- default:
- drm_info(dev, "Analog VGA only\n");
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 323af2746aa9..db2010a55674 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -997,10 +997,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0);
- if (ast->tx_chip_type == AST_TX_DP501)
+ if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 1);
- if (ast->tx_chip_type == AST_TX_ASTDP) {
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON);
ast_wait_for_vretrace(ast);
ast_dp_set_on_off(crtc->dev, 1);
@@ -1012,17 +1012,17 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
ch = mode;
- if (ast->tx_chip_type == AST_TX_DP501)
+ if (ast->tx_chip_types & AST_TX_DP501_BIT)
ast_set_dp501_video_output(crtc->dev, 0);
- break;
- if (ast->tx_chip_type == AST_TX_ASTDP) {
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ast_dp_set_on_off(crtc->dev, 0);
ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF);
}
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch);
+ break;
}
}
@@ -1155,7 +1155,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
ast_crtc_load_lut(ast, crtc);
//Set Aspeed Display-Port
- if (ast->tx_chip_type == AST_TX_ASTDP)
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_set_mode(crtc, vbios_mode_info);
mutex_unlock(&ast->ioregs_lock);
@@ -1739,22 +1739,26 @@ int ast_mode_config_init(struct ast_private *ast)
ast_crtc_init(dev);
- switch (ast->tx_chip_type) {
- case AST_TX_NONE:
+ if (ast->tx_chip_types & AST_TX_NONE_BIT) {
ret = ast_vga_output_init(ast);
- break;
- case AST_TX_SIL164:
+ if (ret)
+ return ret;
+ }
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
ret = ast_sil164_output_init(ast);
- break;
- case AST_TX_DP501:
+ if (ret)
+ return ret;
+ }
+ if (ast->tx_chip_types & AST_TX_DP501_BIT) {
ret = ast_dp501_output_init(ast);
- break;
- case AST_TX_ASTDP:
+ if (ret)
+ return ret;
+ }
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
ret = ast_astdp_output_init(ast);
- break;
+ if (ret)
+ return ret;
}
- if (ret)
- return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 0aa9cf0fb5c3..82fd3c8adee1 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev)
ast_init_3rdtx(dev);
} else {
- if (ast->tx_chip_type != AST_TX_NONE)
+ if (ast->tx_chip_types & AST_TX_SIL164_BIT)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index b97f6e8f0f6b..01c8b80e34ec 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1267,6 +1267,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
}
static
+struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
+ struct drm_atomic_state *state)
+{
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_old_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
struct drm_atomic_state *state)
{
@@ -1446,14 +1465,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
{
struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_crtc *crtc;
+ struct drm_crtc *old_crtc, *new_crtc;
+ struct drm_crtc_state *old_crtc_state = NULL;
struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
- crtc = analogix_dp_get_new_crtc(dp, old_state);
- if (!crtc)
+ new_crtc = analogix_dp_get_new_crtc(dp, old_state);
+ if (!new_crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
if (!new_crtc_state)
goto out;
@@ -1462,6 +1483,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
return;
out:
+ old_crtc = analogix_dp_get_old_crtc(dp, old_state);
+ if (old_crtc) {
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
+ old_crtc);
+
+ /* When moving from PSR to fully disabled, exit PSR first. */
+ if (old_crtc_state && old_crtc_state->self_refresh_active) {
+ ret = analogix_dp_disable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to disable psr (%d)\n", ret);
+ }
+ }
+
analogix_dp_bridge_disable(bridge);
}
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index b2675c769a55..4b503c544256 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -74,22 +74,6 @@ static int fsl_ldb_attach(struct drm_bridge *bridge,
bridge, flags);
}
-static int fsl_ldb_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- /* Invert DE signal polarity. */
- bridge_state->input_bus_cfg.flags &= ~(DRM_BUS_FLAG_DE_LOW |
- DRM_BUS_FLAG_DE_HIGH);
- if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_LOW)
- bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH;
- else if (bridge_state->output_bus_cfg.flags & DRM_BUS_FLAG_DE_HIGH)
- bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_LOW;
-
- return 0;
-}
-
static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
@@ -153,7 +137,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
reg = LDB_CTRL_CH0_ENABLE;
if (fsl_ldb->lvds_dual_link)
- reg |= LDB_CTRL_CH1_ENABLE;
+ reg |= LDB_CTRL_CH1_ENABLE | LDB_CTRL_SPLIT_MODE;
if (lvds_format_24bpp) {
reg |= LDB_CTRL_CH0_DATA_WIDTH;
@@ -233,7 +217,7 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- if (mode->clock > (fsl_ldb->lvds_dual_link ? 80000 : 160000))
+ if (mode->clock > (fsl_ldb->lvds_dual_link ? 160000 : 80000))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -241,7 +225,6 @@ fsl_ldb_mode_valid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs funcs = {
.attach = fsl_ldb_attach,
- .atomic_check = fsl_ldb_atomic_check,
.atomic_enable = fsl_ldb_atomic_enable,
.atomic_disable = fsl_ldb_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 2831f0813c3a..ac66f408b40c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -577,7 +577,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
ctx->host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
- if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) {
+ if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) {
ret = -EINVAL;
goto err_put_node;
}
diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
index 74bd4a76b253..059fd71424f6 100644
--- a/drivers/gpu/drm/drm_aperture.c
+++ b/drivers/gpu/drm/drm_aperture.c
@@ -329,7 +329,20 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver)
{
resource_size_t base, size;
- int bar, ret = 0;
+ int bar, ret;
+
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
+#if IS_REACHABLE(CONFIG_FB)
+ ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
+ if (ret)
+ return ret;
+#endif
+ ret = vga_remove_vgacon(pdev);
+ if (ret)
+ return ret;
for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
@@ -339,15 +352,6 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
drm_aperture_detach_drivers(base, size);
}
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, req_driver->name);
-#endif
- if (ret == 0)
- ret = vga_remove_vgacon(pdev);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 9603193d2fa1..987e4b212e9f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1011,9 +1011,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
return drm_atomic_crtc_effectively_active(old_state);
/*
- * We need to run through the crtc_funcs->disable() function if the CRTC
- * is currently on, if it's transitioning to self refresh mode, or if
- * it's in self refresh mode and needs to be fully disabled.
+ * We need to disable bridge(s) and CRTC if we're transitioning out of
+ * self-refresh and changing CRTCs at the same time, because the
+ * bridge tracks self-refresh status via CRTC state.
+ */
+ if (old_state->self_refresh_active &&
+ old_state->crtc != new_state->crtc)
+ return true;
+
+ /*
+ * We also need to run through the crtc_funcs->disable() function if
+ * the CRTC is currently on, if it's transitioning to self refresh
+ * mode, or if it's in self refresh mode and needs to be fully
+ * disabled.
*/
return old_state->active ||
(old_state->self_refresh_active && !new_state->active) ||
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index d5962a34c01d..e5fc875990c4 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ dma_resv_lock(gem->resv, NULL);
+ ret = ttm_bo_vmap(bo, map);
+ dma_resv_unlock(gem->resv);
- return ttm_bo_vmap(bo, map);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map);
+ dma_resv_unlock(gem->resv);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 4e853acfd1e8..d4e0f2e85548 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO NEXT */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "NEXT"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
@@ -280,6 +286,21 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Tablet 2 830F / 830L */
+ .matches = {
+ /*
+ * Note this also matches the Lenovo Yoga Tablet 2 1050F/L
+ * since that uses the same mainboard. The resolution match
+ * will limit this to only matching on the 830F/L. Neither has
+ * any external video outputs so those are not a concern.
+ */
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Partial match on beginning of BIOS version */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 424ea23eec32..16c539657f73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -177,15 +177,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
DRM_COMPONENT_DRIVER
}, {
- DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
- DRM_COMPONENT_DRIVER
- }, {
DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
DRM_COMPONENT_DRIVER
}, {
+ DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
+ DRM_COMPONENT_DRIVER
+ }, {
DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
DRM_COMPONENT_DRIVER
}, {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9e06f8e2a863..09ce28ee08d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -26,6 +26,7 @@
#include <drm/drm_print.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
@@ -100,9 +101,7 @@ struct exynos_mic {
bool i80_mode;
struct videomode vm;
- struct drm_encoder *encoder;
struct drm_bridge bridge;
- struct drm_bridge *next_bridge;
bool enabled;
};
@@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
-static void mic_disable(struct drm_bridge *bridge) { }
-
static void mic_post_disable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
@@ -297,34 +294,30 @@ unlock:
mutex_unlock(&mic_mutex);
}
-static void mic_enable(struct drm_bridge *bridge) { }
-
-static int mic_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct exynos_mic *mic = bridge->driver_private;
-
- return drm_bridge_attach(bridge->encoder, mic->next_bridge,
- &mic->bridge, flags);
-}
-
static const struct drm_bridge_funcs mic_bridge_funcs = {
- .disable = mic_disable,
.post_disable = mic_post_disable,
.mode_set = mic_mode_set,
.pre_enable = mic_pre_enable,
- .enable = mic_enable,
- .attach = mic_attach,
};
static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ struct drm_encoder *e, *encoder = NULL;
+
+ drm_for_each_encoder(e, drm_dev)
+ if (e->possible_crtcs == drm_crtc_mask(&crtc->base))
+ encoder = e;
+ if (!encoder)
+ return -ENODEV;
mic->bridge.driver_private = mic;
- return 0;
+ return drm_bridge_attach(encoder, &mic->bridge, NULL, 0);
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_mic *mic;
- struct device_node *remote;
struct resource res;
int ret, i;
@@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
}
}
- remote = of_graph_get_remote_node(dev->of_node, 1, 0);
- mic->next_bridge = of_drm_find_bridge(remote);
- if (IS_ERR(mic->next_bridge)) {
- DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n");
- ret = PTR_ERR(mic->next_bridge);
- goto err;
- }
-
- of_node_put(remote);
-
platform_set_drvdata(pdev, mic);
mic->bridge.funcs = &mic_bridge_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e4a79c11fd25..ff67899522cf 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -388,13 +388,23 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
+static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
+{
+ u32 voltage;
+
+ voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
+
+ return voltage == VOLTAGE_INFO_0_85V;
+}
+
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+ if (intel_phy_is_combo(dev_priv, phy) &&
+ (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
return 540000;
return 810000;
@@ -402,7 +412,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- if (intel_dp_is_edp(intel_dp))
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
+ return 540000;
+
+ return 810000;
+}
+
+static int dg1_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
return 540000;
return 810000;
@@ -445,7 +471,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = 810000;
+ max_rate = dg1_max_source_rate(intel_dp);
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 061b277e5ce7..14d2a64193b2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -839,6 +839,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
+ drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 22f55574a35c..88c2f38aa870 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2396,7 +2396,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
}
/*
- * Display WA #22010492432: ehl, tgl, adl-p
+ * Display WA #22010492432: ehl, tgl, adl-s, adl-p
* Program half of the nominal DCO divider fraction value.
*/
static bool
@@ -2404,7 +2404,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
- IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
+ IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->dpll.ref_clks.nssc == 38400;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab4c5ab28e4d..321af109d484 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -933,8 +933,9 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_PERSISTENCE:
if (args->size)
ret = -EINVAL;
- ret = proto_context_set_persistence(fpriv->dev_priv, pc,
- args->value);
+ else
+ ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+ args->value);
break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 3e5d6057b3ef..1674b0c5802b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -35,12 +35,12 @@ bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (obj->cache_dirty)
return false;
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
- return true;
-
if (IS_DGFX(i915))
return false;
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ return true;
+
/* Currently in use by HW (display engine)? Keep flushed. */
return i915_gem_object_is_framebuffer(obj);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index c326bd2b444f..30fe847c6664 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
- err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ /* Reserve enough slots to accommodate composite fences */
+ err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index f46ee16a323a..a4fb577eceb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -60,6 +60,8 @@ __i915_gem_object_create_region(struct intel_memory_region *mem,
if (page_size)
default_page_size = page_size;
+ /* We should be able to fit a page within an sg entry */
+ GEM_BUG_ON(overflows_type(default_page_size, u32));
GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
GEM_BUG_ON(default_page_size < PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 4c25d9b2f138..8f1bb6a4b7d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -620,10 +620,15 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct ttm_resource *res)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ u32 page_alignment;
if (!i915_ttm_gtt_binds_lmem(res))
return i915_ttm_tt_get_st(bo->ttm);
+ page_alignment = bo->page_alignment << PAGE_SHIFT;
+ if (!page_alignment)
+ page_alignment = obj->mm.region->min_page_size;
+
/*
* If CPU mapping differs, we need to add the ttm_tt pages to
* the resulting st. Might make sense for GGTT.
@@ -634,7 +639,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
struct i915_refct_sgt *rsgt;
rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
- res);
+ res,
+ page_alignment);
if (IS_ERR(rsgt))
return rsgt;
@@ -643,7 +649,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
}
- return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
+ return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
+ page_alignment);
}
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 319936f91ac5..e6e01c2a74a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include "gt/intel_engine.h"
+#include "gt/intel_rps.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -31,6 +32,37 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
timeout);
}
+static void
+i915_gem_object_boost(struct dma_resv *resv, unsigned int flags)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ /*
+ * Prescan all fences for potential boosting before we begin waiting.
+ *
+ * When we wait, we wait on outstanding fences serially. If the
+ * dma-resv contains a sequence such as 1:1, 1:2 instead of a reduced
+ * form 1:2, then as we look at each wait in turn we see that each
+ * request is currently executing and not worthy of boosting. But if
+ * we only happen to look at the final fence in the sequence (because
+ * of request coalescing or splitting between read/write arrays by
+ * the iterator), then we would boost. As such our decision to boost
+ * or not is delicately balanced on the order we wait on fences.
+ *
+ * So instead of looking for boosts sequentially, look for all boosts
+ * upfront and then wait on the outstanding fences.
+ */
+
+ dma_resv_iter_begin(&cursor, resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ if (dma_fence_is_i915(fence) &&
+ !i915_request_started(to_request(fence)))
+ intel_rps_boost(to_request(fence));
+ dma_resv_iter_end(&cursor);
+}
+
static long
i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
@@ -40,6 +72,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
struct dma_fence *fence;
long ret = timeout ?: 1;
+ i915_gem_object_boost(resv, flags);
+
dma_resv_iter_begin(&cursor, resv,
dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence) {
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 09f82545789f..44e7339e7a4a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -273,10 +273,17 @@ struct intel_context {
u8 child_index;
/** @guc: GuC specific members for parallel submission */
struct {
- /** @wqi_head: head pointer in work queue */
+ /** @wqi_head: cached head pointer in work queue */
u16 wqi_head;
- /** @wqi_tail: tail pointer in work queue */
+ /** @wqi_tail: cached tail pointer in work queue */
u16 wqi_tail;
+ /** @wq_head: pointer to the actual head in work queue */
+ u32 *wq_head;
+ /** @wq_tail: pointer to the actual head in work queue */
+ u32 *wq_tail;
+ /** @wq_status: pointer to the status in work queue */
+ u32 *wq_status;
+
/**
* @parent_page: page in context state (ce->state) used
* by parent for work queue, process descriptor
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 86f7a9ac1c39..2b0266cab66b 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
i915_request_put(rq);
}
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+ if (prio > I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_HIGH;
+ else if (prio < I915_PRIORITY_NORMAL)
+ return GEN12_CTX_PRIORITY_LOW;
+ else
+ return GEN12_CTX_PRIORITY_NORMAL;
+}
+
static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
@@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
desc = ce->lrc.desc;
if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
- desc |= lrc_desc_priority(rq_prio(rq));
+ desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
/*
* WaIdleLiteRestore:bdw,skl
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 53307ca0eed0..531af6ad7007 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -785,6 +785,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
+ intel_gt_sysfs_unregister(gt);
intel_rps_driver_unregister(&gt->rps);
intel_gsc_fini(&gt->gsc);
@@ -1208,6 +1209,20 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+
+ for_each_engine(engine, gt, id) {
+ struct reg_and_bit rb;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ }
+
+ spin_unlock_irq(&uncore->lock);
+
for_each_engine(engine, gt, id) {
/*
* HW architecture suggest typical invalidation time at 40us,
@@ -1222,7 +1237,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)
if (!i915_mmio_reg_offset(rb.reg))
continue;
- intel_uncore_write_fw(uncore, rb.reg, rb.bit);
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 8ec8bc660c8c..9e4ebf53379b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj)
static struct intel_gt *kobj_to_gt(struct kobject *kobj)
{
- return container_of(kobj, struct kobj_gt, base)->gt;
+ return container_of(kobj, struct intel_gt, sysfs_gt);
}
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
@@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = {
};
ATTRIBUTE_GROUPS(id);
+/* A kobject needs a release() method even if it does nothing */
static void kobj_gt_release(struct kobject *kobj)
{
- kfree(kobj);
}
static struct kobj_type kobj_gt_type = {
@@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = {
void intel_gt_sysfs_register(struct intel_gt *gt)
{
- struct kobj_gt *kg;
-
/*
* We need to make things right with the
* ABI compatibility. The files were originally
@@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
if (gt_is_root(gt))
intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
- kg = kzalloc(sizeof(*kg), GFP_KERNEL);
- if (!kg)
+ /* init and xfer ownership to sysfs tree */
+ if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type,
+ gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
- kobject_init(&kg->base, &kobj_gt_type);
- kg->gt = gt;
-
- /* xfer ownership to sysfs tree */
- if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id))
- goto exit_kobj_put;
-
- intel_gt_sysfs_pm_init(gt, &kg->base);
+ intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
-exit_kobj_put:
- kobject_put(&kg->base);
-
exit_fail:
+ kobject_put(&gt->sysfs_gt);
drm_warn(&gt->i915->drm,
"failed to initialize gt%d sysfs root\n", gt->info.id);
}
+
+void intel_gt_sysfs_unregister(struct intel_gt *gt)
+{
+ kobject_put(&gt->sysfs_gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
index 9471b26752cf..a99aa7e8b01a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -13,11 +13,6 @@
struct intel_gt;
-struct kobj_gt {
- struct kobject base;
- struct intel_gt *gt;
-};
-
bool is_object_gt(struct kobject *kobj);
struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
@@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt,
const char *name);
void intel_gt_sysfs_register(struct intel_gt *gt);
+void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index b06611c1d4ad..edd7a3cf5f5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -224,6 +224,9 @@ struct intel_gt {
} mocs;
struct intel_pxp pxp;
+
+ /* gt/gtN sysfs */
+ struct kobject sysfs_gt;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 31be734010db..a390f0813c8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -111,16 +111,6 @@ enum {
#define XEHP_SW_COUNTER_SHIFT 58
#define XEHP_SW_COUNTER_WIDTH 6
-static inline u32 lrc_desc_priority(int prio)
-{
- if (prio > I915_PRIORITY_NORMAL)
- return GEN12_CTX_PRIORITY_HIGH;
- else if (prio < I915_PRIORITY_NORMAL)
- return GEN12_CTX_PRIORITY_LOW;
- else
- return GEN12_CTX_PRIORITY_NORMAL;
-}
-
static inline void lrc_runtime_start(struct intel_context *ce)
{
struct intel_context_stats *stats = &ce->stats;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index a5338c3fde7a..c68d36fb5bbd 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -300,9 +300,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
return err;
}
-static int gen6_reset_engines(struct intel_gt *gt,
- intel_engine_mask_t engine_mask,
- unsigned int retry)
+static int __gen6_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
{
struct intel_engine_cs *engine;
u32 hw_mask;
@@ -321,6 +321,20 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
+static int gen6_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&gt->uncore->lock, flags);
+ ret = __gen6_reset_engines(gt, engine_mask, retry);
+ spin_unlock_irqrestore(&gt->uncore->lock, flags);
+
+ return ret;
+}
+
static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
{
int vecs_id;
@@ -487,9 +501,9 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
}
-static int gen11_reset_engines(struct intel_gt *gt,
- intel_engine_mask_t engine_mask,
- unsigned int retry)
+static int __gen11_reset_engines(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned int retry)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
@@ -583,8 +597,11 @@ static int gen8_reset_engines(struct intel_gt *gt,
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
intel_engine_mask_t tmp;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&gt->uncore->lock, flags);
+
for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
@@ -612,17 +629,19 @@ static int gen8_reset_engines(struct intel_gt *gt,
* This is best effort, so ignore any error from the initial reset.
*/
if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
- gen11_reset_engines(gt, gt->info.engine_mask, 0);
+ __gen11_reset_engines(gt, gt->info.engine_mask, 0);
if (GRAPHICS_VER(gt->i915) >= 11)
- ret = gen11_reset_engines(gt, engine_mask, retry);
+ ret = __gen11_reset_engines(gt, engine_mask, retry);
else
- ret = gen6_reset_engines(gt, engine_mask, retry);
+ ret = __gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
+ spin_unlock_irqrestore(&gt->uncore->lock, flags);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 8b2c11dbe354..1109088fe8f6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -176,8 +176,8 @@ static int live_lrc_layout(void *arg)
continue;
hw = shmem_pin_map(engine->default_state);
- if (IS_ERR(hw)) {
- err = PTR_ERR(hw);
+ if (!hw) {
+ err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
@@ -365,8 +365,8 @@ static int live_lrc_fixed(void *arg)
continue;
hw = shmem_pin_map(engine->default_state);
- if (IS_ERR(hw)) {
- err = PTR_ERR(hw);
+ if (!hw) {
+ err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 4ef9990ed7f8..29ef8afc8c2e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -122,6 +122,9 @@ enum intel_guc_action {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 966e69a8b1c1..9feda105f913 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -170,6 +170,11 @@ struct intel_guc {
/** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size;
+ /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+ struct i915_vma *lrc_desc_pool_v69;
+ /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+ void *lrc_desc_pool_vaddr_v69;
+
/**
* @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 42cb7a9a6199..89a7e5ec0614 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -203,6 +203,20 @@ struct guc_wq_item {
u32 fence_id;
} __packed;
+struct guc_process_desc_v69 {
+ u32 stage_id;
+ u64 db_base_addr;
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u64 wq_base_addr;
+ u32 wq_size_bytes;
+ u32 wq_status;
+ u32 engine_presence;
+ u32 priority;
+ u32 reserved[36];
+} __packed;
+
struct guc_sched_wq_desc {
u32 head;
u32 tail;
@@ -227,6 +241,37 @@ struct guc_ctxt_registration_info {
};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69 BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+ u32 hw_context_desc;
+ u32 slpm_perf_mode_hint; /* SPLC v1 only */
+ u32 slpm_freq_hint;
+ u32 engine_submit_mask; /* In logical space */
+ u8 engine_class;
+ u8 reserved0[3];
+ u32 priority;
+ u32 process_desc;
+ u32 wq_addr;
+ u32 wq_size;
+ u32 context_flags; /* CONTEXT_REGISTRATION_* */
+ /* Time for one workload to execute. (in micro seconds) */
+ u32 execution_quantum;
+ /* Time to wait for a preemption request to complete before issuing a
+ * reset. (in micro seconds).
+ */
+ u32 preemption_timeout;
+ u32 policy_flags; /* CONTEXT_POLICY_* */
+ u32 reserved1[19];
+} __packed;
+
/* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t {
u32 kl;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1726f0f19901..9ffb343d0f79 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -414,12 +414,15 @@ struct sync_semaphore {
};
struct parent_scratch {
- struct guc_sched_wq_desc wq_desc;
+ union guc_descs {
+ struct guc_sched_wq_desc wq_desc;
+ struct guc_process_desc_v69 pdesc;
+ } descs;
struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
- u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+ u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)];
@@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
LRC_STATE_OFFSET) / sizeof(u32)));
}
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->descs.pdesc;
+}
+
static struct guc_sched_wq_desc *
-__get_wq_desc(struct intel_context *ce)
+__get_wq_desc_v70(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
- return &ps->wq_desc;
+ return &ps->descs.wq_desc;
}
-static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
- struct intel_context *ce,
- u32 wqi_size)
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{
/*
* Check for space in work queue. Caching a value of head pointer in
@@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
#define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) {
- ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
+ ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
if (wqi_size > AVAILABLE_SPACE)
return NULL;
@@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
return ce;
}
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+ struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+ if (!base)
+ return NULL;
+
+ GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+ return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+ u32 size;
+ int ret;
+
+ size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+ GUC_MAX_CONTEXT_ID);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+ (void **)&guc->lrc_desc_pool_vaddr_v69);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+ if (!guc->lrc_desc_pool_vaddr_v69)
+ return;
+
+ guc->lrc_desc_pool_vaddr_v69 = NULL;
+ i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
static inline bool guc_submission_initialized(struct intel_guc *guc)
{
return guc->submission_initialized;
}
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+ struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+ if (desc)
+ memset(desc, 0, sizeof(*desc));
+}
+
static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{
return __get_context(guc, id);
@@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
if (unlikely(!guc_submission_initialized(guc)))
return;
+ _reset_lrc_desc_v69(guc, id);
+
/*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly.
@@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
true, timeout);
}
-static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
GEM_BUG_ON(context_guc_id_invalid(ce));
if (context_policy_required(ce)) {
- err = guc_context_policy_init(ce, false);
+ err = guc_context_policy_init_v70(ce, false);
if (err)
return err;
}
@@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
}
-static void write_wqi(struct guc_sched_wq_desc *wq_desc,
- struct intel_context *ce,
- u32 wqi_size)
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
{
BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
@@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1);
- WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
+ WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
}
static int guc_wq_noop_append(struct intel_context *ce)
{
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
- u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
+ u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi)
@@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child;
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32);
u32 *wqi;
@@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
return ret;
}
- wqi = get_wq_pointer(wq_desc, ce, wqi_size);
+ wqi = get_wq_pointer(ce, wqi_size);
if (!wqi)
return -EBUSY;
@@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64);
- write_wqi(wq_desc, ce, wqi_size);
+ write_wqi(ce, wqi_size);
return 0;
}
@@ -1868,20 +1919,34 @@ static void reset_fail_worker_func(struct work_struct *w);
int intel_guc_submission_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
if (guc->submission_initialized)
return 0;
+ if (guc->fw.major_ver_found < 70) {
+ ret = guc_lrc_desc_pool_create_v69(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
+ if (!guc->submission_state.guc_ids_bitmap) {
+ ret = -ENOMEM;
+ goto destroy_pool;
+ }
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt);
guc->submission_initialized = true;
return 0;
+
+destroy_pool:
+ guc_lrc_desc_pool_destroy_v69(guc);
+
+ return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1890,6 +1955,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
return;
guc_flush_destroyed_contexts(guc);
+ guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
guc->submission_initialized = false;
@@ -2147,10 +2213,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
-static int __guc_action_register_multi_lrc(struct intel_guc *guc,
- struct intel_context *ce,
- struct guc_ctxt_registration_info *info,
- bool loop)
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+ struct intel_context *ce,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[4 + MAX_ENGINE_INSTANCE];
+ int len = 0;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = guc_id;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = offset;
+ for_each_child(ce, child) {
+ offset += sizeof(struct guc_lrc_desc_v69);
+ action[len++] = offset;
+ }
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+ struct intel_context *ce,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
{
struct intel_context *child;
u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@@ -2190,9 +2280,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
-static int __guc_action_register_context(struct intel_guc *guc,
- struct guc_ctxt_registration_info *info,
- bool loop)
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_REGISTER_CONTEXT,
+ guc_id,
+ offset,
+ };
+
+ return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ 0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+ struct guc_ctxt_registration_info *info,
+ bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
@@ -2213,24 +2318,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
0, loop);
}
-static void prepare_context_registration_info(struct intel_context *ce,
- struct guc_ctxt_registration_info *info);
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info);
-static int register_context(struct intel_context *ce, bool loop)
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+ u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+ ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+ prepare_context_registration_info_v69(ce);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+ offset, loop);
+ else
+ return __guc_action_register_context_v69(guc, ce->guc_id.id,
+ offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
struct guc_ctxt_registration_info info;
+
+ prepare_context_registration_info_v70(ce, &info);
+
+ if (intel_context_is_parent(ce))
+ return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+ else
+ return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
struct intel_guc *guc = ce_to_guc(ce);
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- prepare_context_registration_info(ce, &info);
-
- if (intel_context_is_parent(ce))
- ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
+ if (guc->fw.major_ver_found >= 70)
+ ret = register_context_v70(guc, ce, loop);
else
- ret = __guc_action_register_context(guc, &info, loop);
+ ret = register_context_v69(guc, ce, loop);
+
if (likely(!ret)) {
unsigned long flags;
@@ -2238,7 +2371,8 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- guc_context_policy_init(ce, loop);
+ if (guc->fw.major_ver_found >= 70)
+ guc_context_policy_init_v70(ce, loop);
}
return ret;
@@ -2335,7 +2469,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
0, loop);
}
-static int guc_context_policy_init(struct intel_context *ce, bool loop)
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2394,8 +2528,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
return ret;
}
-static void prepare_context_registration_info(struct intel_context *ce,
- struct guc_ctxt_registration_info *info)
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+ struct guc_lrc_desc_v69 *desc)
+{
+ desc->policy_flags = 0;
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+ /* NB: For both of these, zero means disabled. */
+ desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+ /*
+ * this matches the mapping we do in map_i915_prio_to_guc_prio()
+ * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+ */
+ switch (prio) {
+ default:
+ MISSING_CASE(prio);
+ fallthrough;
+ case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+ return GEN12_CTX_PRIORITY_NORMAL;
+ case GUC_CLIENT_PRIORITY_NORMAL:
+ return GEN12_CTX_PRIORITY_LOW;
+ case GUC_CLIENT_PRIORITY_HIGH:
+ case GUC_CLIENT_PRIORITY_KMD_HIGH:
+ return GEN12_CTX_PRIORITY_HIGH;
+ }
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ u32 ctx_id = ce->guc_id.id;
+ struct guc_lrc_desc_v69 *desc;
+ struct intel_context *child;
+
+ GEM_BUG_ON(!engine->mask);
+
+ /*
+ * Ensure LRC + CT vmas are is same region as write barrier is done
+ * based on CT vma region.
+ */
+ GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+ i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+ desc = __get_lrc_desc_v69(guc, ctx_id);
+ desc->engine_class = engine_class_to_guc_class(engine->class);
+ desc->engine_submit_mask = engine->logical_mask;
+ desc->hw_context_desc = ce->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc_v69 *pdesc;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ desc->process_desc = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ desc->wq_addr = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ desc->wq_size = WQ_SIZE;
+
+ pdesc = __get_process_desc_v69(ce);
+ memset(pdesc, 0, sizeof(*(pdesc)));
+ pdesc->stage_id = ce->guc_id.id;
+ pdesc->wq_base_addr = desc->wq_addr;
+ pdesc->wq_size_bytes = desc->wq_size;
+ pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+ ce->parallel.guc.wq_head = &pdesc->head;
+ ce->parallel.guc.wq_tail = &pdesc->tail;
+ ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+ for_each_child(ce, child) {
+ desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+ desc->engine_class =
+ engine_class_to_guc_class(engine->class);
+ desc->hw_context_desc = child->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init_v69(engine, desc);
+ }
+
+ clear_children_join_go_memory(ce);
+ }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2420,6 +2654,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
*/
info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+ if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+ info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/*
@@ -2443,10 +2679,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
info->wq_base_hi = upper_32_bits(wq_base_offset);
info->wq_size = WQ_SIZE;
- wq_desc = __get_wq_desc(ce);
+ wq_desc = __get_wq_desc_v70(ce);
memset(wq_desc, 0, sizeof(*wq_desc));
wq_desc->wq_status = WQ_STATUS_ACTIVE;
+ ce->parallel.guc.wq_head = &wq_desc->head;
+ ce->parallel.guc.wq_tail = &wq_desc->tail;
+ ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
clear_children_join_go_memory(ce);
}
}
@@ -2761,11 +3001,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- struct context_policy policy;
+ if (guc->fw.major_ver_found >= 70) {
+ struct context_policy policy;
- __guc_context_policy_start_klv(&policy, guc_id);
- __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
- __guc_context_set_context_policies(guc, &policy, true);
+ __guc_context_policy_start_klv(&policy, guc_id);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+ guc_id,
+ preemption_timeout
+ };
+
+ intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
}
static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -3013,11 +3263,21 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- struct context_policy policy;
+ if (guc->fw.major_ver_found >= 70) {
+ struct context_policy policy;
- __guc_context_policy_start_klv(&policy, ce->guc_id.id);
- __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
- __guc_context_set_context_policies(guc, &policy, true);
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_set_context_policies(guc, &policy, true);
+ } else {
+ u32 action[] = {
+ INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+ ce->guc_id.id,
+ ce->guc_state.prio,
+ };
+
+ guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ }
}
static void guc_context_set_prio(struct intel_guc *guc,
@@ -4527,17 +4787,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) {
- struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children);
- drm_printf(p, "\t\tWQI Head: %u\n",
- READ_ONCE(wq_desc->head));
- drm_printf(p, "\t\tWQI Tail: %u\n",
- READ_ONCE(wq_desc->tail));
- drm_printf(p, "\t\tWQI Status: %u\n\n",
- READ_ONCE(wq_desc->wq_status));
+
+ if (ce->parallel.guc.wq_status) {
+ drm_printf(p, "\t\tWQI Head: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_head));
+ drm_printf(p, "\t\tWQI Tail: %u\n",
+ READ_ONCE(*ce->parallel.guc.wq_tail));
+ drm_printf(p, "\t\tWQI Status: %u\n\n",
+ READ_ONCE(*ce->parallel.guc.wq_status));
+ }
if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index d078f884b5e3..703f42ba5ddd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
+#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3))
+
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
@@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
MODULE_FIRMWARE(uc_);
INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
/* The below structs and macros are used to iterate across the list of blobs */
@@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
static const struct uc_fw_platform_requirement blobs_guc[] = {
INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
};
+ static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
+ INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+ };
static const struct uc_fw_platform_requirement blobs_huc[] = {
INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
};
@@ -156,12 +164,21 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
- static const struct uc_fw_platform_requirement *fw_blobs;
+ const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
+ /*
+ * The only difference between the ADL GuC FWs is the HWConfig support.
+ * ADL-N does not support HWConfig, so we should use the same binary as
+ * ADL-S, otherwise the GuC might attempt to fetch a config table that
+ * does not exist.
+ */
+ if (IS_ADLP_N(i915))
+ p = INTEL_ALDERLAKE_S;
+
GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
fw_blobs = blobs_all[uc_fw->type].blobs;
fw_count = blobs_all[uc_fw->type].count;
@@ -170,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
const struct uc_fw_blob *blob = &fw_blobs[i].blob;
uc_fw->path = blob->path;
+ uc_fw->wanted_path = blob->path;
uc_fw->major_ver_wanted = blob->major;
uc_fw->minor_ver_wanted = blob->minor;
break;
}
}
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
+ const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
+ u32 count = ARRAY_SIZE(blobs_guc_fallback);
+
+ for (i = 0; i < count && p <= blobs[i].p; i++) {
+ if (p == blobs[i].p && rev >= blobs[i].rev) {
+ const struct uc_fw_blob *blob = &blobs[i].blob;
+
+ uc_fw->fallback.path = blob->path;
+ uc_fw->fallback.major_ver = blob->major;
+ uc_fw->fallback.minor_ver = blob->minor;
+ break;
+ }
+ }
+ }
+
/* make sure the list is ordered as expected */
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
for (i = 1; i < fw_count; i++) {
@@ -329,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = request_firmware(&fw, uc_fw->path, dev);
+ err = firmware_request_nowarn(&fw, uc_fw->path, dev);
+ if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
+ err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
+ if (!err) {
+ drm_notice(&i915->drm,
+ "%s firmware %s is recommended, but only %s was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->wanted_path,
+ uc_fw->fallback.path);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+
+ uc_fw->path = uc_fw->fallback.path;
+ uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
+ uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+ }
+ }
if (err)
goto fail;
@@ -428,8 +479,8 @@ fail:
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
- drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
@@ -787,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
+ if (uc_fw->fallback.path) {
+ drm_printf(p, "%s firmware fallback: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
+ drm_printf(p, "fallback selected: %s\n",
+ str_yes_no(uc_fw->path == uc_fw->fallback.path));
+ }
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 3229018877d3..562acdf88adb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -74,6 +74,7 @@ struct intel_uc_fw {
const enum intel_uc_fw_status status;
enum intel_uc_fw_status __status; /* no accidental overwrites */
};
+ const char *wanted_path;
const char *path;
bool user_overridden;
size_t size;
@@ -98,6 +99,12 @@ struct intel_uc_fw {
u16 major_ver_found;
u16 minor_ver_found;
+ struct {
+ const char *path;
+ u16 major_ver;
+ u16 minor_ver;
+ } fallback;
+
u32 rsa_size;
u32 ucode_size;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9eb75a2b400..1c35a41620ae 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -3117,9 +3117,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
continue;
vaddr = shmem_pin_map(engine->default_state);
- if (IS_ERR(vaddr)) {
- gvt_err("failed to map %s->default state, err:%zd\n",
- engine->name, PTR_ERR(vaddr));
+ if (!vaddr) {
+ gvt_err("failed to map %s->default state\n",
+ engine->name);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 90b0ce5051af..1041b5340465 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -530,6 +530,7 @@ mask_err:
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
int ret;
if (i915_inject_probe_failure(dev_priv))
@@ -641,6 +642,15 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_bw_init_hw(dev_priv);
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM and on s2idle cases.
+ */
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_disable(root_pdev);
+
return 0;
err_msi:
@@ -664,11 +674,16 @@ err_perf:
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
i915_perf_fini(dev_priv);
if (pdev->msi_enabled)
pci_disable_msi(pdev);
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_enable(root_pdev);
}
/**
@@ -1193,14 +1208,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
goto out;
}
- /*
- * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
- * This should be totally removed when we handle the pci states properly
- * on runtime PM and on s2idle cases.
- */
- if (suspend_to_idle(dev_priv))
- pci_d3cold_disable(pdev);
-
pci_disable_device(pdev);
/*
* During hibernation on some platforms the BIOS may try to access
@@ -1365,8 +1372,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev);
- pci_d3cold_enable(pdev);
-
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@@ -1543,7 +1548,6 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@@ -1589,12 +1593,6 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
- /*
- * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
- * This should be totally removed when we handle the pci states properly
- * on runtime PM and on s2idle cases.
- */
- pci_d3cold_disable(pdev);
rpm->suspended = true;
/*
@@ -1633,7 +1631,6 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
int ret;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
@@ -1646,7 +1643,6 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
- pci_d3cold_enable(pdev);
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 18d38cb59923..b09d1d386574 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -116,8 +116,9 @@ show_client_class(struct seq_file *m,
total += busy_add(ctx, class);
rcu_read_unlock();
- seq_printf(m, "drm-engine-%s:\t%llu ns\n",
- uabi_class_names[class], total);
+ if (capacity)
+ seq_printf(m, "drm-engine-%s:\t%llu ns\n",
+ uabi_class_names[class], total);
if (capacity > 1)
seq_printf(m, "drm-engine-capacity-%s:\t%u\n",
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 159571b9bd24..dcc081874ec8 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -68,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
* drm_mm_node
* @node: The drm_mm_node.
* @region_start: An offset to add to the dma addresses of the sg list.
+ * @page_alignment: Required page alignment for each sg entry. Power of two.
*
* Create a struct sg_table, initializing it from a struct drm_mm_node,
* taking a maximum segment length into account, splitting into segments
@@ -77,22 +78,25 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
* error code cast to an error pointer on failure.
*/
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
- u64 region_start)
+ u64 region_start,
+ u32 page_alignment)
{
- const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
- u64 segment_pages = max_segment >> PAGE_SHIFT;
+ const u32 max_segment = round_down(UINT_MAX, page_alignment);
+ const u32 segment_pages = max_segment >> PAGE_SHIFT;
u64 block_size, offset, prev_end;
struct i915_refct_sgt *rsgt;
struct sg_table *st;
struct scatterlist *sg;
+ GEM_BUG_ON(!max_segment);
+
rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
if (!rsgt)
return ERR_PTR(-ENOMEM);
i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
st = &rsgt->table;
- if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
+ if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
@@ -112,12 +116,14 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
sg = __sg_next(sg);
sg_dma_address(sg) = region_start + offset;
+ GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
+ page_alignment));
sg_dma_len(sg) = 0;
sg->length = 0;
st->nents++;
}
- len = min(block_size, max_segment - sg->length);
+ len = min_t(u64, block_size, max_segment - sg->length);
sg->length += len;
sg_dma_len(sg) += len;
@@ -138,6 +144,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
* i915_buddy_block list
* @res: The struct i915_ttm_buddy_resource.
* @region_start: An offset to add to the dma addresses of the sg list.
+ * @page_alignment: Required page alignment for each sg entry. Power of two.
*
* Create a struct sg_table, initializing it from struct i915_buddy_block list,
* taking a maximum segment length into account, splitting into segments
@@ -147,11 +154,12 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
* error code cast to an error pointer on failure.
*/
struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
- u64 region_start)
+ u64 region_start,
+ u32 page_alignment)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
const u64 size = res->num_pages << PAGE_SHIFT;
- const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+ const u32 max_segment = round_down(UINT_MAX, page_alignment);
struct drm_buddy *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
struct drm_buddy_block *block;
@@ -161,6 +169,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
resource_size_t prev_end;
GEM_BUG_ON(list_empty(blocks));
+ GEM_BUG_ON(!max_segment);
rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
if (!rsgt)
@@ -191,12 +200,14 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
sg = __sg_next(sg);
sg_dma_address(sg) = region_start + offset;
+ GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
+ page_alignment));
sg_dma_len(sg) = 0;
sg->length = 0;
st->nents++;
}
- len = min(block_size, max_segment - sg->length);
+ len = min_t(u64, block_size, max_segment - sg->length);
sg->length += len;
sg_dma_len(sg) += len;
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index 12c6a1684081..9ddb3e743a3e 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -213,9 +213,11 @@ static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
- u64 region_start);
+ u64 region_start,
+ u32 page_alignment);
struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
- u64 region_start);
+ u64 region_start,
+ u32 page_alignment);
#endif
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 8521daba212a..1e2750210831 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
- ssize_t ret;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
@@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
const char *str = "No error state collected\n";
size_t len = strlen(str);
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
}
return ret;
@@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
+
+ kobject_put(dev_priv->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4f6db539571a..04d12f278f57 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -23,6 +23,7 @@
*/
#include <linux/sched/mm.h>
+#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
#include "display/intel_frontbuffer.h"
@@ -1636,10 +1637,10 @@ static void force_unbind(struct i915_vma *vma)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
}
-static void release_references(struct i915_vma *vma, bool vm_ddestroy)
+static void release_references(struct i915_vma *vma, struct intel_gt *gt,
+ bool vm_ddestroy)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct intel_gt *gt = vma->vm->gt;
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1694,11 +1695,12 @@ void i915_vma_destroy_locked(struct i915_vma *vma)
force_unbind(vma);
list_del_init(&vma->vm_link);
- release_references(vma, false);
+ release_references(vma, vma->vm->gt, false);
}
void i915_vma_destroy(struct i915_vma *vma)
{
+ struct intel_gt *gt;
bool vm_ddestroy;
mutex_lock(&vma->vm->mutex);
@@ -1706,8 +1708,11 @@ void i915_vma_destroy(struct i915_vma *vma)
list_del_init(&vma->vm_link);
vm_ddestroy = vma->vm_ddestroy;
vma->vm_ddestroy = false;
+
+ /* vma->vm may be freed when releasing vma->vm->mutex. */
+ gt = vma->vm->gt;
mutex_unlock(&vma->vm->mutex);
- release_references(vma, vm_ddestroy);
+ release_references(vma, gt, vm_ddestroy);
}
void i915_vma_parked(struct intel_gt *gt)
@@ -1823,6 +1828,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
if (unlikely(err))
return err;
+ /*
+ * Reserve fences slot early to prevent an allocation after preparing
+ * the workload and associating fences with dma_resv.
+ */
+ if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ struct dma_fence *curr;
+ int idx;
+
+ dma_fence_array_for_each(curr, idx, fence)
+ ;
+ err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
+ if (unlikely(err))
+ return err;
+ }
+
if (flags & EXEC_OBJECT_WRITE) {
struct intel_frontbuffer *front;
@@ -1832,31 +1852,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
i915_active_add_request(&front->write, rq);
intel_frontbuffer_put(front);
}
+ }
- if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
- if (unlikely(err))
- return err;
- }
+ if (fence) {
+ struct dma_fence *curr;
+ enum dma_resv_usage usage;
+ int idx;
- if (fence) {
- dma_resv_add_fence(vma->obj->base.resv, fence,
- DMA_RESV_USAGE_WRITE);
+ obj->read_domains = 0;
+ if (flags & EXEC_OBJECT_WRITE) {
+ usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
- obj->read_domains = 0;
- }
- } else {
- if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
- if (unlikely(err))
- return err;
+ } else {
+ usage = DMA_RESV_USAGE_READ;
}
- if (fence) {
- dma_resv_add_fence(vma->obj->base.resv, fence,
- DMA_RESV_USAGE_READ);
- obj->write_domain = 0;
- }
+ dma_fence_array_for_each(curr, idx, fence)
+ dma_resv_add_fence(vma->obj->base.resv, curr, usage);
}
if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 62ff77445b01..575d67bc6ffe 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -152,6 +152,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
* Convert an opaque TTM resource manager resource to a refcounted sg_table.
* @mem: The memory region.
* @res: The resource manager resource obtained from the TTM resource manager.
+ * @page_alignment: Required page alignment for each sg entry. Power of two.
*
* The gem backends typically use sg-tables for operations on the underlying
* io_memory. So provide a way for the backends to translate the
@@ -161,16 +162,19 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
*/
struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
- struct ttm_resource *res)
+ struct ttm_resource *res,
+ u32 page_alignment)
{
if (mem->is_range_manager) {
struct ttm_range_mgr_node *range_node =
to_ttm_range_mgr_node(res);
return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
- mem->region.start);
+ mem->region.start,
+ page_alignment);
} else {
- return i915_rsgt_from_buddy_resource(res, mem->region.start);
+ return i915_rsgt_from_buddy_resource(res, mem->region.start,
+ page_alignment);
}
}
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
index cf9d86dcf409..5bb8d8b582ae 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.h
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -24,7 +24,8 @@ int intel_region_ttm_fini(struct intel_memory_region *mem);
struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
- struct ttm_resource *res);
+ struct ttm_resource *res,
+ u32 page_alignment);
void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 8633bec18fa7..ab9f17fc85bc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -742,7 +742,7 @@ static int pot_hole(struct i915_address_space *vm,
u64 addr;
for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
- addr <= round_down(hole_end - (2 * min_alignment), step) - min_alignment;
+ hole_end > addr && hole_end - addr >= 2 * min_alignment;
addr += step) {
err = i915_vma_pin(vma, 0, 0, addr | flags);
if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 73eb53edb8de..3b18e5905c86 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -451,7 +451,6 @@ out_put:
static int igt_mock_max_segment(void *arg)
{
- const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
struct intel_memory_region *mem = arg;
struct drm_i915_private *i915 = mem->i915;
struct i915_ttm_buddy_resource *res;
@@ -460,7 +459,10 @@ static int igt_mock_max_segment(void *arg)
struct drm_buddy *mm;
struct list_head *blocks;
struct scatterlist *sg;
+ I915_RND_STATE(prng);
LIST_HEAD(objects);
+ unsigned int max_segment;
+ unsigned int ps;
u64 size;
int err = 0;
@@ -472,7 +474,13 @@ static int igt_mock_max_segment(void *arg)
*/
size = SZ_8G;
- mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
+ ps = PAGE_SIZE;
+ if (i915_prandom_u64_state(&prng) & 1)
+ ps = SZ_64K; /* For something like DG2 */
+
+ max_segment = round_down(UINT_MAX, ps);
+
+ mem = mock_region_create(i915, 0, size, ps, 0, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
@@ -498,12 +506,21 @@ static int igt_mock_max_segment(void *arg)
}
for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ dma_addr_t daddr = sg_dma_address(sg);
+
if (sg->length > max_segment) {
pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
__func__, sg->length, max_segment);
err = -EINVAL;
goto out_close;
}
+
+ if (!IS_ALIGNED(daddr, ps)) {
+ pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
+ __func__, &daddr, ps);
+ err = -EINVAL;
+ goto out_close;
+ }
}
out_close:
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 670557ce1024..bac21fe84ca5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -33,7 +33,8 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
return PTR_ERR(obj->mm.res);
obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
- obj->mm.res);
+ obj->mm.res,
+ obj->mm.region->min_page_size);
if (IS_ERR(obj->mm.rsgt)) {
err = PTR_ERR(obj->mm.rsgt);
goto err_free_resource;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index c849533ca83e..3f5750cc2673 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss);
if (ret) {
+ of_node_put(dcss->of_port);
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
dcss_clocks_disable(dcss);
}
+ of_node_put(dcss->of_port);
+
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9c8829f945b2..f7863d6dea80 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
if (plane == &ipu_crtc->plane[0]->base)
disable_full = true;
- if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
disable_partial = true;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 4e665c806a14..efe9840e28fa 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->cur = ring->start;
ring->next = ring->start;
-
- /* reset completed fence seqno: */
- ring->memptrs->fence = ring->fctx->completed_fence;
ring->memptrs->rptr = 0;
+
+ /* Detect and clean up an impossible fence, ie. if GPU managed
+ * to scribble something invalid, we don't want that to confuse
+ * us into mistakingly believing that submits have completed.
+ */
+ if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
+ ring->memptrs->fence = ring->fctx->last_fence;
+ }
}
return 0;
@@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
- pm_runtime_disable(&priv->gpu_pdev->dev);
+ if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 3a462e327e0e..a1b8c4592943 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1251,12 +1251,13 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_BEGIN("encoder_vblank_callback");
dpu_enc = to_dpu_encoder_virt(drm_enc);
+ atomic_inc(&phy_enc->vsync_cnt);
+
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
if (dpu_enc->crtc)
dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
- atomic_inc(&phy_enc->vsync_cnt);
DPU_ATRACE_END("encoder_vblank_callback");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 59da348ff339..0ec809ab06e7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -252,11 +252,6 @@ static int dpu_encoder_phys_wb_atomic_check(
DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
- return 0;
-
- fb = conn_state->writeback_job->fb;
-
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
@@ -267,6 +262,11 @@ static int dpu_encoder_phys_wb_atomic_check(
return -EINVAL;
}
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
fb->width, fb->height);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 399115e4e217..2fd787079f9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
- return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth,
+ /*
+ * We should ideally be limiting the modes only to the maxlinewidth but
+ * on some chipsets this will allow even 4k modes to be added which will
+ * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
+ * and source split support added lets limit the modes based on max_mixer_width
+ * as 4K modes can then be supported.
+ */
+ return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
dev->mode_config.max_height);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index fb48c8c19ec3..17cb1fc78379 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+ of_node_put(panel_node);
return PTR_ERR(encoder);
}
@@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+ of_node_put(panel_node);
return PTR_ERR(connector);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index b7f5b8d3bbd6..703249384e7c 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1534,6 +1534,8 @@ end:
return ret;
}
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
+
static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
if (!ret)
- ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+ ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
else
DRM_ERROR("failed to enable DP link controller\n");
@@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
+{
+ int ret;
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+ ret = dp_ctrl_enable_stream_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_ctrl_send_phy_test_pattern(ctrl);
+
+ return 0;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
@@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
goto end;
}
- if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
- dp_ctrl_send_phy_test_pattern(ctrl);
- return 0;
- }
-
- if (!dp_ctrl_channel_eq_ok(ctrl))
+ if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 0745fde01b45..b563e2e3bfe5 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -21,7 +21,7 @@ struct dp_ctrl {
};
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bce77935394f..239c8e3f2fbd 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -309,12 +309,15 @@ static void dp_display_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
/* disable all HPD interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+ if (dp->core_initialized)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
kthread_stop(dp->ev_tsk);
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
+ dp->drm_dev = NULL;
+ dp->aux->drm_dev = NULL;
priv->dp[dp->id] = NULL;
}
@@ -872,7 +875,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
return 0;
}
- rc = dp_ctrl_on_stream(dp->ctrl);
+ rc = dp_ctrl_on_stream(dp->ctrl, data);
if (!rc)
dp_display->power_on = true;
@@ -1659,6 +1662,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
int rc = 0;
struct dp_display_private *dp_display;
u32 state;
+ bool force_link_train = false;
dp_display = container_of(dp, struct dp_display_private, dp_display);
if (!dp_display->dp_mode.drm_mode.clock) {
@@ -1693,10 +1697,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
state = dp_display->hpd_state;
- if (state == ST_DISPLAY_OFF)
+ if (state == ST_DISPLAY_OFF) {
dp_display_host_phy_init(dp_display);
+ force_link_train = true;
+ }
- dp_display_enable(dp_display, 0);
+ dp_display_enable(dp_display, force_link_train);
rc = dp_display_post_enable(dp);
if (rc) {
@@ -1705,10 +1711,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge)
dp_display_unprepare(dp);
}
- /* manual kick off plug event to train link */
- if (state == ST_DISPLAY_OFF)
- dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
-
/* completed connection */
dp_display->hpd_state = ST_CONNECTED;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 44485363f37a..14ab9a627d8b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
+ .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 08388d742d65..099a67d10c3a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 3df255402a33..38e3323bc232 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
(int32_t)(*fctx->fenceptr - fence) >= 0;
}
-/* called from workqueue */
+/* called from irq handler and workqueue (in recover path) */
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{
- spin_lock(&fctx->spinlock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->spinlock, flags);
fctx->completed_fence = max(fence, fctx->completed_fence);
- spin_unlock(&fctx->spinlock);
+ spin_unlock_irqrestore(&fctx->spinlock, flags);
}
struct msm_fence {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 97d5b4d8b9b0..7f92231785a0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
return ret;
}
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
- msm_gem_unpin_vma(vma);
-
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
@@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
- msm_gem_unpin_vma_locked(obj, vma);
+ msm_gem_unpin_vma(vma);
+ msm_gem_unpin_locked(obj);
}
msm_gem_unlock(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c75d3b879a53..6b7d5bb3b575 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -145,7 +145,7 @@ struct msm_gem_object {
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
-void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+void msm_gem_unpin_locked(struct drm_gem_object *obj);
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
int msm_gem_get_iova(struct drm_gem_object *obj,
@@ -377,10 +377,11 @@ struct msm_gem_submit {
} *cmd; /* array of size nr_cmds */
struct {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_ACTIVE 0x2000 /* active refcnt is held */
-#define BO_PINNED 0x1000 /* obj is pinned and on active list */
+#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
+#define BO_LOCKED 0x4000 /* obj lock is held */
+#define BO_ACTIVE 0x2000 /* active refcnt is held */
+#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
uint32_t flags;
union {
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 94ab705e9b8a..dcc8a573bc76 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -11,6 +11,21 @@
#include "msm_drv.h"
#include "msm_gem.h"
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ /* Ensure the mmap offset is initialized. We lazily initialize it,
+ * so if it has not been first mmap'd directly as a GEM object, the
+ * mmap offset will not be already initialized.
+ */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ return ret;
+
+ return drm_gem_prime_mmap(obj, vma);
+}
+
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 80975229b4de..c9e4aeb14f4a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
*/
submit->bos[i].flags &= ~cleanup_flags;
- if (flags & BO_PINNED)
- msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
+ if (flags & BO_VMA_PINNED)
+ msm_gem_unpin_vma(submit->bos[i].vma);
+
+ if (flags & BO_OBJ_PINNED)
+ msm_gem_unpin_locked(obj);
if (flags & BO_ACTIVE)
msm_gem_active_put(obj);
@@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
- submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED);
+ unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
+ BO_ACTIVE | BO_LOCKED;
+ submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- submit->bos[i].flags |= BO_PINNED;
+ submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
submit->bos[i].vma = vma;
if (vma->iova == submit->bos[i].iova) {
@@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error)
unsigned i;
if (error)
- cleanup_flags |= BO_PINNED | BO_ACTIVE;
+ cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
msm_gem_lock(obj);
- submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE);
+ /* Note, VMA already fence-unpinned before submit: */
+ submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
@@ -922,7 +928,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
INT_MAX, GFP_KERNEL);
}
if (submit->fence_id < 0) {
- ret = submit->fence_id = 0;
+ ret = submit->fence_id;
submit->fence_id = 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3c1dc9241831..c471aebcdbab 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
unsigned size = vma->node.size;
/* Print a message if we try to purge a vma in use */
- if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
- return;
+ GEM_WARN_ON(msm_gem_vma_inuse(vma));
/* Don't do anything if the memory isn't mapped */
if (!vma->mapped)
@@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
- if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
- return;
+ GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
spin_lock(&aspace->lock);
if (vma->iova)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index eb8a6663f309..c8cd9bfa3eeb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
-static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
- uint32_t fence)
-{
- struct msm_gem_submit *submit;
- unsigned long flags;
-
- spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node) {
- if (fence_after(submit->seqno, fence))
- break;
-
- msm_update_fence(submit->ring->fctx,
- submit->hw_fence->seqno);
- dma_fence_signal(submit->hw_fence);
- }
- spin_unlock_irqrestore(&ring->submit_lock, flags);
-}
-
#ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
@@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work)
* one more to clear the faulting submit
*/
if (ring == cur_ring)
- fence++;
+ ring->memptrs->fence = ++fence;
- update_fences(gpu, ring, fence);
+ msm_update_fence(ring->fctx, fence);
}
if (msm_gpu_active(gpu)) {
@@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
@@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_devfreq_idle(gpu);
mutex_unlock(&gpu->active_lock);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
msm_gem_submit_put(submit);
}
@@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++)
- update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
+ msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index bcaddbba564d..a54ed354578b 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
u64 addr = iova;
unsigned int i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 43066320ff8c..56eecb4a72dc 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_lock(obj);
msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
- submit->bos[i].flags &= ~BO_PINNED;
+ submit->bos[i].flags &= ~BO_VMA_PINNED;
msm_gem_unlock(obj);
}
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index c96014464355..a189982601a4 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
desc->delay.hpd_reliable = reliable_ms;
of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
- desc->delay.hpd_reliable = absent_ms;
+ desc->delay.hpd_absent = absent_ms;
/* Power the panel on so we can read the EDID */
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 7fcbc2a5b6cd..b1e6d238674f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
@@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
- job->file_priv = file->driver_priv;
+ job->mmu = file_priv->mmu;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
- &job->file_priv->sched_entity[slot],
+ &file_priv->sched_entity[slot],
NULL);
if (ret)
goto out_put_job;
@@ -432,8 +433,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
- list_add_tail(&bo->base.madv_list,
- &pfdev->shrinker_list);
+ list_move_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index fda5871aebe3..7c4208476fbd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
return;
}
- cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
+ cfg = panfrost_mmu_as_get(pfdev, job->mmu);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
@@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
job->jc = 0;
}
- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
+ panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
if (signal_fence)
@@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
* happen when we receive the DONE interrupt while doing a GPU reset).
*/
job->jc = 0;
- panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
+ panfrost_mmu_as_put(pfdev, job->mmu);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 77e6d0e6f612..8becc1ba0eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -17,7 +17,7 @@ struct panfrost_job {
struct kref refcount;
struct panfrost_device *pfdev;
- struct panfrost_file_priv *file_priv;
+ struct panfrost_mmu *mmu;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index d3f82b26a631..b285a8001b1d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -518,7 +518,7 @@ err_map:
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_bo:
- drm_gem_object_put(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 67d38f53d3e5..13ed33e74457 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -23,6 +23,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_detach_device(...) ({ })
+#define arm_iommu_release_mapping(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
@@ -49,6 +57,15 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
if (!private->domain)
return 0;
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ if (mapping) {
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+ }
+
ret = iommu_attach_device(private->domain, dev);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 191c56064f19..6b25b2f4f5a3 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
- init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
- irq_work_queue(&job->work);
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
}
static struct dma_fence *
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 08394444dd6e..f4886e66ff34 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -350,7 +350,7 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
/* Set precharge period in number of ticks from the internal clock */
precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) |
- SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep2));
+ SSD130X_SET_PRECHARGE_PERIOD2_SET(ssd130x->prechargep2));
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 275f7e4a03ae..6eb1aabdb161 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -7,6 +7,7 @@
*/
#include <linux/component.h>
+#include <linux/dma-mapping.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/of_graph.h>
@@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev)
goto free_drm;
}
- dev_set_drvdata(dev, drm);
drm->dev_private = drv;
INIT_LIST_HEAD(&drv->frontend_list);
INIT_LIST_HEAD(&drv->engine_list);
@@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev)
drm_fbdev_generic_setup(drm, 32);
+ dev_set_drvdata(dev, drm);
+
return 0;
finish_poll:
@@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
+ dev_set_drvdata(dev, NULL);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
@@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev)
INIT_KFIFO(list.fifo);
+ /*
+ * DE2 and DE3 cores actually supports 40-bit addresses, but
+ * driver does not.
+ */
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
for (i = 0;; i++) {
struct device_node *pipeline = of_parse_phandle(np,
"allwinner,pipelines",
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 6d43080791a0..85fb9e800ddf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -117,7 +117,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
if (IS_ERR_OR_NULL(layer->backend->frontend))
- sun4i_backend_format_is_supported(format, modifier);
+ return sun4i_backend_format_is_supported(format, modifier);
return sun4i_backend_format_is_supported(format, modifier) ||
sun4i_frontend_format_is_supported(format, modifier);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a8d75fd7e9f4..477cb6985b4d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -93,34 +93,10 @@ crtcs_exit:
return crtcs;
}
-static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
- struct platform_device **pdev_out)
-{
- struct platform_device *pdev;
- struct device_node *remote;
-
- remote = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!remote)
- return -ENODEV;
-
- if (!of_device_is_compatible(remote, "hdmi-connector")) {
- of_node_put(remote);
- return -ENODEV;
- }
-
- pdev = of_find_device_by_node(remote);
- of_node_put(remote);
- if (!pdev)
- return -ENODEV;
-
- *pdev_out = pdev;
- return 0;
-}
-
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev), *connector_pdev;
+ struct platform_device *pdev = to_platform_device(dev);
struct dw_hdmi_plat_data *plat_data;
struct drm_device *drm = data;
struct device_node *phy_node;
@@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
"Couldn't get regulator\n");
- ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
- if (!ret) {
- hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
- "ddc-en", GPIOD_OUT_HIGH);
- platform_device_put(connector_pdev);
-
- if (IS_ERR(hdmi->ddc_en)) {
- dev_err(dev, "Couldn't get ddc-en gpio\n");
- return PTR_ERR(hdmi->ddc_en);
- }
- }
-
ret = regulator_enable(hdmi->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
- goto err_unref_ddc_en;
+ return ret;
}
- gpiod_set_value(hdmi->ddc_en, 1);
-
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
- goto err_disable_ddc_en;
+ goto err_disable_regulator;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -245,12 +207,8 @@ err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
-err_disable_ddc_en:
- gpiod_set_value(hdmi->ddc_en, 0);
+err_disable_regulator:
regulator_disable(hdmi->regulator);
-err_unref_ddc_en:
- if (hdmi->ddc_en)
- gpiod_put(hdmi->ddc_en);
return ret;
}
@@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_deinit(hdmi->phy);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
- gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator);
-
- if (hdmi->ddc_en)
- gpiod_put(hdmi->ddc_en);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index bffe1b9cd3dc..9ad09522947a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -9,7 +9,6 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_encoder.h>
#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -193,7 +192,6 @@ struct sun8i_dw_hdmi {
struct regulator *regulator;
const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl;
- struct gpio_desc *ddc_en;
};
extern struct platform_driver sun8i_hdmi_phy_driver;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 75d308ec173d..406e9c324e76 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -109,11 +109,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
return;
spin_lock(&bo->bdev->lru_lock);
- if (bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
bo->bulk_move = bulk;
- if (bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+ if (bo->resource)
+ ttm_resource_add_bulk_move(bo->resource, bo);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_set_bulk_move);
@@ -689,8 +689,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
WARN_ON_ONCE(!kref_read(&bo->kref));
- if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+ spin_lock(&bo->bdev->lru_lock);
+ if (bo->resource)
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ ++bo->pin_count;
+ spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_pin);
@@ -707,8 +710,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
if (WARN_ON_ONCE(!bo->pin_count))
return;
- if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
- ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+ spin_lock(&bo->bdev->lru_lock);
+ --bo->pin_count;
+ if (bo->resource)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unpin);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index a0562ab386f5..e7147e304637 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
ttm_resource_manager_for_each_res(man, &cursor, res) {
struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages = PFN_UP(bo->base.size);
+ uint32_t num_pages;
+ if (!bo)
+ continue;
+
+ num_pages = PFN_UP(bo->base.size);
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 65889b3caf50..20f9adcc3235 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
}
/* Add the resource to a bulk_move cursor */
-void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
- struct ttm_resource *res)
+static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
@@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
}
/* Remove the resource from a bulk_move range */
-void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
- struct ttm_resource *res)
+static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
@@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
}
}
+/* Add the resource to a bulk move if the BO is configured for it */
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ if (bo->bulk_move && !bo->pin_count)
+ ttm_lru_bulk_move_add(bo->bulk_move, res);
+}
+
+/* Remove the resource from a bulk move if the BO is configured for it */
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ if (bo->bulk_move && !bo->pin_count)
+ ttm_lru_bulk_move_del(bo->bulk_move, res);
+}
+
/* Move a resource to the LRU or bulk tail */
void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
{
@@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
res->bo = bo;
- INIT_LIST_HEAD(&res->lru);
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- man->usage += res->num_pages << PAGE_SHIFT;
- if (bo->bulk_move)
- ttm_lru_bulk_move_add(bo->bulk_move, res);
+ if (bo->pin_count)
+ list_add_tail(&res->lru, &bo->bdev->pinned);
else
- ttm_resource_move_to_lru_tail(res);
+ list_add_tail(&res->lru, &man->lru[bo->priority]);
+ man->usage += res->num_pages << PAGE_SHIFT;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
@@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ int ret;
+
+ ret = man->func->alloc(man, bo, place, res_ptr);
+ if (ret)
+ return ret;
- return man->func->alloc(man, bo, place, res_ptr);
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(*res_ptr, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+ return 0;
}
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
@@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
if (!*res)
return;
- if (bo->bulk_move) {
- spin_lock(&bo->bdev->lru_lock);
- ttm_lru_bulk_move_del(bo->bulk_move, *res);
- spin_unlock(&bo->bdev->lru_lock);
- }
-
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(*res, bo);
+ spin_unlock(&bo->bdev->lru_lock);
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 49c0f2ac868b..b8d856312846 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4->purgeable.lock);
list_add_tail(&bo->size_head, &vc4->purgeable.list);
vc4->purgeable.num++;
@@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* list_del_init() is used here because the caller might release
* the purgeable lock in order to acquire the madv one and update the
* madv status.
@@ -387,6 +393,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
@@ -413,6 +422,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
struct drm_gem_cma_object *cma_obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+
if (size == 0)
return ERR_PTR(-EINVAL);
@@ -471,19 +483,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo;
}
-int vc4_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
+int vc4_bo_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL;
int ret;
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
- if (args->size < args->pitch * args->height)
- args->size = args->pitch * args->height;
+ ret = vc4_dumb_fixup_args(args);
+ if (ret)
+ return ret;
bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
if (IS_ERR(bo))
@@ -601,8 +614,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
int vc4_bo_inc_usecnt(struct vc4_bo *bo)
{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
/* Fast path: if the BO is already retained by someone, no need to
* check the madv status.
*/
@@ -637,6 +654,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
void vc4_bo_dec_usecnt(struct vc4_bo *bo)
{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* Fast path: if the BO is still retained by someone, no need to test
* the madv value.
*/
@@ -756,6 +778,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
ret = vc4_grab_bin_bo(vc4, vc4file);
if (ret)
return ret;
@@ -779,9 +804,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
@@ -805,6 +834,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->size == 0)
return -EINVAL;
@@ -875,11 +907,15 @@ fail:
int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_set_tiling *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
bool t_format;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->flags != 0)
return -EINVAL;
@@ -918,10 +954,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_tiling *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->flags != 0 || args->modifier != 0)
return -EINVAL;
@@ -948,6 +988,9 @@ int vc4_bo_cache_init(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
/* Create the initial set of BO labels that the kernel will
* use. This lets us avoid a bunch of string reallocation in
* the kernel's draw and BO allocation paths.
@@ -1007,6 +1050,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gem_obj;
int ret = 0, label;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!args->len)
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 59b20c8f132b..9355213dc883 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -256,7 +256,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
* Removing 1 from the FIFO full level however
* seems to completely remove that issue.
*/
- if (!vc4->hvs->hvs5)
+ if (!vc4->is_vc5)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
@@ -389,7 +389,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- if (vc4->hvs->hvs5)
+ if (vc4->is_vc5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
@@ -775,17 +775,18 @@ struct vc4_async_flip_state {
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
- struct vc4_seqno_cb cb;
+ union {
+ struct dma_fence_cb fence;
+ struct vc4_seqno_cb seqno;
+ } cb;
};
/* Called when the V3D execution for the BO being flipped to is done, so that
* we can actually update the plane's address to point to it.
*/
static void
-vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
+vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
{
- struct vc4_async_flip_state *flip_state =
- container_of(cb, struct vc4_async_flip_state, cb);
struct drm_crtc *crtc = flip_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_plane *plane = crtc->primary;
@@ -802,59 +803,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb);
- /* Decrement the BO usecnt in order to keep the inc/dec calls balanced
- * when the planes are updated through the async update path.
- * FIXME: we should move to generic async-page-flip when it's
- * available, so that we can get rid of this hand-made cleanup_fb()
- * logic.
- */
- if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo;
- struct vc4_bo *bo;
+ if (flip_state->old_fb)
+ drm_framebuffer_put(flip_state->old_fb);
+
+ kfree(flip_state);
+}
+
+static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
+{
+ struct vc4_async_flip_state *flip_state =
+ container_of(cb, struct vc4_async_flip_state, cb.seqno);
+ struct vc4_bo *bo = NULL;
- cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
+ if (flip_state->old_fb) {
+ struct drm_gem_cma_object *cma_bo =
+ drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&cma_bo->base);
- vc4_bo_dec_usecnt(bo);
- drm_framebuffer_put(flip_state->old_fb);
}
- kfree(flip_state);
+ vc4_async_page_flip_complete(flip_state);
+
+ /*
+ * Decrement the BO usecnt in order to keep the inc/dec
+ * calls balanced when the planes are updated through
+ * the async update path.
+ *
+ * FIXME: we should move to generic async-page-flip when
+ * it's available, so that we can get rid of this
+ * hand-made cleanup_fb() logic.
+ */
+ if (bo)
+ vc4_bo_dec_usecnt(bo);
}
-/* Implements async (non-vblank-synced) page flips.
- *
- * The page flip ioctl needs to return immediately, so we grab the
- * modeset semaphore on the pipe, and queue the address update for
- * when V3D is done with the BO being flipped to.
- */
-static int vc4_async_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags)
+static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
{
- struct drm_device *dev = crtc->dev;
- struct drm_plane *plane = crtc->primary;
- int ret = 0;
- struct vc4_async_flip_state *flip_state;
+ struct vc4_async_flip_state *flip_state =
+ container_of(cb, struct vc4_async_flip_state, cb.fence);
+
+ vc4_async_page_flip_complete(flip_state);
+ dma_fence_put(fence);
+}
+
+static int vc4_async_set_fence_cb(struct drm_device *dev,
+ struct vc4_async_flip_state *flip_state)
+{
+ struct drm_framebuffer *fb = flip_state->fb;
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct dma_fence *fence;
+ int ret;
- /* Increment the BO usecnt here, so that we never end up with an
- * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
- * plane is later updated through the non-async path.
- * FIXME: we should move to generic async-page-flip when it's
- * available, so that we can get rid of this hand-made prepare_fb()
- * logic.
- */
- ret = vc4_bo_inc_usecnt(bo);
+ if (!vc4->is_vc5) {
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+
+ return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
+ vc4_async_page_flip_seqno_complete);
+ }
+
+ ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
+ /* If there's no fence, complete the page flip immediately */
+ if (!fence) {
+ vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+ return 0;
+ }
+
+ /* If the fence has already been completed, complete the page flip */
+ if (dma_fence_add_callback(fence, &flip_state->cb.fence,
+ vc4_async_page_flip_fence_complete))
+ vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);
+
+ return 0;
+}
+
+static int
+vc4_async_page_flip_common(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_plane *plane = crtc->primary;
+ struct vc4_async_flip_state *flip_state;
+
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
- if (!flip_state) {
- vc4_bo_dec_usecnt(bo);
+ if (!flip_state)
return -ENOMEM;
- }
drm_framebuffer_get(fb);
flip_state->fb = fb;
@@ -881,23 +919,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
*/
drm_atomic_set_fb_for_plane(plane->state, fb);
- vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
- vc4_async_page_flip_complete);
+ vc4_async_set_fence_cb(dev, flip_state);
/* Driver takes ownership of state on successful async commit. */
return 0;
}
+/* Implements async (non-vblank-synced) page flips.
+ *
+ * The page flip ioctl needs to return immediately, so we grab the
+ * modeset semaphore on the pipe, and queue the address update for
+ * when V3D is done with the BO being flipped to.
+ */
+static int vc4_async_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ int ret;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
+ /*
+ * Increment the BO usecnt here, so that we never end up with an
+ * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
+ * plane is later updated through the non-async path.
+ *
+ * FIXME: we should move to generic async-page-flip when
+ * it's available, so that we can get rid of this
+ * hand-made prepare_fb() logic.
+ */
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret)
+ return ret;
+
+ ret = vc4_async_page_flip_common(crtc, fb, event, flags);
+ if (ret) {
+ vc4_bo_dec_usecnt(bo);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vc5_async_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
+{
+ return vc4_async_page_flip_common(crtc, fb, event, flags);
+}
+
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
struct drm_modeset_acquire_ctx *ctx)
{
- if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
- return vc4_async_page_flip(crtc, fb, event, flags);
- else
+ if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (vc4->is_vc5)
+ return vc5_async_page_flip(crtc, fb, event, flags);
+ else
+ return vc4_async_page_flip(crtc, fb, event, flags);
+ } else {
return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx);
+ }
}
struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
@@ -1149,7 +1243,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
crtc_funcs, NULL);
drm_crtc_helper_add(crtc, crtc_helper_funcs);
- if (!vc4->hvs->hvs5) {
+ if (!vc4->is_vc5) {
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 162bc18e7497..0f0f0263e744 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
return map;
}
+int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args)
+{
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ return 0;
+}
+
+static int vc5_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+
+ ret = vc4_dumb_fixup_args(args);
+ if (ret)
+ return ret;
+
+ return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+}
+
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d)
return -ENODEV;
@@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
static int vc4_open(struct drm_device *dev, struct drm_file *file)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
if (!vc4file)
return -ENOMEM;
+ vc4file->dev = vc4;
vc4_perfmon_open_file(vc4file);
file->driver_priv = vc4file;
@@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (vc4file->bin_bo_used)
vc4_v3d_bin_bo_put(vc4);
@@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
};
-static struct drm_driver vc4_drm_driver = {
+static const struct drm_driver vc4_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM |
@@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create),
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static const struct drm_driver vc5_drm_driver = {
+ .driver_features = (DRIVER_MODESET |
+ DRIVER_ATOMIC |
+ DRIVER_GEM),
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = vc4_debugfs_init,
+#endif
+
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+
+ .fops = &vc4_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
static void vc4_match_add_drivers(struct device *dev,
struct component_match **match,
struct platform_driver *const *drivers,
@@ -212,42 +270,49 @@ static void vc4_match_add_drivers(struct device *dev,
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct drm_driver *driver;
struct rpi_firmware *firmware = NULL;
struct drm_device *drm;
struct vc4_dev *vc4;
struct device_node *node;
struct drm_crtc *crtc;
+ bool is_vc5;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- /* If VC4 V3D is missing, don't advertise render nodes. */
- node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
- if (!node || !of_device_is_available(node))
- vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
- of_node_put(node);
+ is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
+ if (is_vc5)
+ driver = &vc5_drm_driver;
+ else
+ driver = &vc4_drm_driver;
- vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+ vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
if (IS_ERR(vc4))
return PTR_ERR(vc4);
+ vc4->is_vc5 = is_vc5;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list);
- mutex_init(&vc4->bin_bo_lock);
+ if (!is_vc5) {
+ mutex_init(&vc4->bin_bo_lock);
- ret = vc4_bo_cache_init(drm);
- if (ret)
- return ret;
+ ret = vc4_bo_cache_init(drm);
+ if (ret)
+ return ret;
+ }
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
- ret = vc4_gem_init(drm);
- if (ret)
- return ret;
+ if (!is_vc5) {
+ ret = vc4_gem_init(drm);
+ if (ret)
+ return ret;
+ }
node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
if (node) {
@@ -258,7 +323,7 @@ static int vc4_drm_bind(struct device *dev)
return -EPROBE_DEFER;
}
- ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver);
+ ret = drm_aperture_remove_framebuffers(false, driver);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 15e0c2ac3940..93fd55b9e99e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -48,6 +48,8 @@ enum vc4_kernel_bo_type {
* done. This way, only events related to a specific job will be counted.
*/
struct vc4_perfmon {
+ struct vc4_dev *dev;
+
/* Tracks the number of users of the perfmon, when this counter reaches
* zero the perfmon is destroyed.
*/
@@ -74,6 +76,8 @@ struct vc4_perfmon {
struct vc4_dev {
struct drm_device base;
+ bool is_vc5;
+
unsigned int irq;
struct vc4_hvs *hvs;
@@ -316,6 +320,7 @@ struct vc4_v3d {
};
struct vc4_hvs {
+ struct vc4_dev *vc4;
struct platform_device *pdev;
void __iomem *regs;
u32 __iomem *dlist;
@@ -333,9 +338,6 @@ struct vc4_hvs {
struct drm_mm_node mitchell_netravali_filter;
struct debugfs_regset32 regset;
-
- /* HVS version 5 flag, therefore requires updated dlist structures */
- bool hvs5;
};
struct vc4_plane {
@@ -580,6 +582,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
struct vc4_exec_info {
+ struct vc4_dev *dev;
+
/* Sequence number for this bin/render job. */
uint64_t seqno;
@@ -701,6 +705,8 @@ struct vc4_exec_info {
* released when the DRM file is closed should be placed here.
*/
struct vc4_file {
+ struct vc4_dev *dev;
+
struct {
struct idr idr;
struct mutex lock;
@@ -814,9 +820,9 @@ struct vc4_validated_shader_info {
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
bool from_cache, enum vc4_kernel_bo_type type);
-int vc4_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+int vc4_bo_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
@@ -885,6 +891,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
+int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
/* vc4_dpi.c */
extern struct platform_driver vc4_dpi_driver;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 9eaf304fc20d..fe10d9c3fff8 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i;
int ret = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
return -ENODEV;
@@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
unsigned long timeout_expire;
DEFINE_WAIT(wait);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (vc4->finished_seqno >= seqno)
return 0;
@@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
again:
exec = vc4_first_bin_job(vc4);
if (!exec)
@@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* A previous RCL may have written to one of our textures, and
* our full cache flush at bin time may have occurred before
* that RCL completed. Flush the texture cache now, but not
@@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
bool was_empty = list_empty(&vc4->render_job_list);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
list_move_tail(&exec->head, &vc4->render_job_list);
if (was_empty)
vc4_submit_next_render_job(dev);
@@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
unsigned long irqflags;
struct vc4_seqno_cb *cb, *cb_temp;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
while (!list_empty(&vc4->job_done_list)) {
struct vc4_exec_info *exec =
@@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
cb->func = func;
INIT_WORK(&cb->work, vc4_seqno_cb_work);
@@ -1083,8 +1104,12 @@ int
vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_wait_seqno *args = data;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
&args->timeout_ns);
}
@@ -1093,11 +1118,15 @@ int
vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
struct drm_vc4_wait_bo *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->pad != 0)
return -EINVAL;
@@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
args->shader_rec_size,
args->bo_handle_count);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
return -ENODEV;
@@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("malloc failure on exec struct\n");
return -ENOMEM;
}
+ exec->dev = vc4;
ret = vc4_v3d_pm_get(vc4);
if (ret) {
@@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
vc4->dma_fence_context = dma_fence_context_alloc(1);
INIT_LIST_HEAD(&vc4->bin_job_list);
@@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused)
int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_gem_madvise *args = data;
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
switch (args->madv) {
case VC4_MADV_DONTNEED:
case VC4_MADV_WILLNEED:
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 823d812f4982..ce9d16666d91 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1481,7 +1481,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
{
- unsigned long long clock = mode->clock * 1000;
+ unsigned long long clock = mode->clock * 1000ULL;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock = clock * 2;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2a58fc421cf6..ba2c8e5a9b64 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -220,10 +220,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
{
+ struct vc4_dev *vc4 = hvs->vc4;
u32 reg;
int ret;
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
return output;
switch (output) {
@@ -273,6 +274,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
struct drm_display_mode *mode, bool oneshot)
{
+ struct vc4_dev *vc4 = hvs->vc4;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int chan = vc4_crtc_state->assigned_channel;
@@ -291,7 +293,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
*/
dispctrl = SCALER_DISPCTRLX_ENABLE;
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
@@ -312,7 +314,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
SCALER_DISPBKGND_AUTOHS |
- ((!hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) |
+ ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0));
/* Reload the LUT, since the SRAMs would have been disabled if
@@ -617,11 +619,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (!hvs)
return -ENOMEM;
+ hvs->vc4 = vc4;
hvs->pdev = pdev;
- if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs"))
- hvs->hvs5 = true;
-
hvs->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs);
@@ -630,7 +630,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->regset.regs = hvs_regs;
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
- if (hvs->hvs5) {
+ if (vc4->is_vc5) {
hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hvs->core_clk)) {
dev_err(&pdev->dev, "Couldn't get core clock\n");
@@ -644,7 +644,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
}
}
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
hvs->dlist = hvs->regs + SCALER_DLIST_START;
else
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
@@ -665,7 +665,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- if (!hvs->hvs5)
+ if (!vc4->is_vc5)
/* 48k words of 2x12-bit pixels */
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 4342fb43e8c1..2eacfb6773d2 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (!vc4->v3d)
return;
@@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (!vc4->v3d)
return;
@@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev)
int vc4_irq_install(struct drm_device *dev, int irq)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
@@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
vc4_irq_disable(dev);
free_irq(vc4->irq, dev);
}
@@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
/* Acknowledge any stale IRQs. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c169bd72e53b..893d831b24aa 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -393,7 +393,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
- if (vc4->hvs->hvs5) {
+ if (vc4->is_vc5) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate);
unsigned long core_rate = max_t(unsigned long,
@@ -412,7 +412,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_ctm_commit(vc4, state);
- if (vc4->hvs->hvs5)
+ if (vc4->is_vc5)
vc5_hvs_pv_muxing_commit(vc4, state);
else
vc4_hvs_pv_muxing_commit(vc4, state);
@@ -430,7 +430,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- if (vc4->hvs->hvs5) {
+ if (vc4->is_vc5) {
drm_dbg(dev, "Running the core clock at %lu Hz\n",
new_hvs_state->core_clock_rate);
@@ -479,8 +479,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_mode_fb_cmd2 mode_cmd_local;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return ERR_PTR(-ENODEV);
+
/* If the user didn't specify a modifier, use the
* vc4_set_tiling_ioctl() state for the BO.
*/
@@ -997,11 +1001,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
.fb_create = vc4_fb_create,
};
+static const struct drm_mode_config_funcs vc5_mode_funcs = {
+ .atomic_check = vc4_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
- "brcm,bcm2711-vc5");
int ret;
/*
@@ -1009,7 +1017,7 @@ int vc4_kms_load(struct drm_device *dev)
* the BCM2711, but the load tracker computations are used for
* the core clock rate calculation.
*/
- if (!is_vc5) {
+ if (!vc4->is_vc5) {
/* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file.
*/
@@ -1025,7 +1033,7 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- if (is_vc5) {
+ if (vc4->is_vc5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
@@ -1033,7 +1041,7 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.max_height = 2048;
}
- dev->mode_config.funcs = &vc4_mode_funcs;
+ dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 18abc06335c1..79a74184d732 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -17,13 +17,30 @@
void vc4_perfmon_get(struct vc4_perfmon *perfmon)
{
- if (perfmon)
- refcount_inc(&perfmon->refcnt);
+ struct vc4_dev *vc4;
+
+ if (!perfmon)
+ return;
+
+ vc4 = perfmon->dev;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
+ refcount_inc(&perfmon->refcnt);
}
void vc4_perfmon_put(struct vc4_perfmon *perfmon)
{
- if (perfmon && refcount_dec_and_test(&perfmon->refcnt))
+ struct vc4_dev *vc4;
+
+ if (!perfmon)
+ return;
+
+ vc4 = perfmon->dev;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
+ if (refcount_dec_and_test(&perfmon->refcnt))
kfree(perfmon);
}
@@ -32,6 +49,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
unsigned int i;
u32 mask;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
return;
@@ -49,6 +69,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
{
unsigned int i;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
if (WARN_ON_ONCE(!vc4->active_perfmon ||
perfmon != vc4->active_perfmon))
return;
@@ -64,8 +87,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
{
+ struct vc4_dev *vc4 = vc4file->dev;
struct vc4_perfmon *perfmon;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return NULL;
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, id);
vc4_perfmon_get(perfmon);
@@ -76,8 +103,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
void vc4_perfmon_open_file(struct vc4_file *vc4file)
{
+ struct vc4_dev *vc4 = vc4file->dev;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_init(&vc4file->perfmon.lock);
idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN);
+ vc4file->dev = vc4;
}
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
@@ -91,6 +124,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data)
void vc4_perfmon_close_file(struct vc4_file *vc4file)
{
+ struct vc4_dev *vc4 = vc4file->dev;
+
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4file->perfmon.lock);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr);
@@ -107,6 +145,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
unsigned int i;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
return -ENODEV;
@@ -127,6 +168,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
GFP_KERNEL);
if (!perfmon)
return -ENOMEM;
+ perfmon->dev = vc4;
for (i = 0; i < req->ncounters; i++)
perfmon->events[i] = req->events[i];
@@ -157,6 +199,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
return -ENODEV;
@@ -182,6 +227,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct vc4_perfmon *perfmon;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (!vc4->v3d) {
DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index b3438f4a81ce..1e866dc00ac3 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -489,10 +489,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
}
/* Align it to 64 or 128 (hvs5) bytes */
- lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+ lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
- lbm /= vc4->hvs->hvs5 ? 4 : 2;
+ lbm /= vc4->is_vc5 ? 4 : 2;
return lbm;
}
@@ -608,7 +608,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
lbm_size,
- vc4->hvs->hvs5 ? 64 : 32,
+ vc4->is_vc5 ? 64 : 32,
0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
@@ -917,7 +917,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha;
- if (!vc4->hvs->hvs5) {
+ if (!vc4->is_vc5) {
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
@@ -1321,6 +1321,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane,
old_vc4_state = to_vc4_plane_state(plane->state);
new_vc4_state = to_vc4_plane_state(new_plane_state);
+
+ if (!new_vc4_state->hw_dlist)
+ return -EINVAL;
+
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
@@ -1385,6 +1389,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_async_update = vc4_plane_atomic_async_update,
};
+static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
+ .atomic_check = vc4_plane_atomic_check,
+ .atomic_update = vc4_plane_atomic_update,
+ .atomic_async_check = vc4_plane_atomic_async_check,
+ .atomic_async_update = vc4_plane_atomic_async_update,
+};
+
static bool vc4_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -1453,14 +1464,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0;
int ret = 0;
unsigned i;
- bool hvs5 = of_device_is_compatible(dev->dev->of_node,
- "brcm,bcm2711-vc5");
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
DRM_FORMAT_MOD_BROADCOM_SAND128,
@@ -1476,7 +1486,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- if (!hvs_formats[i].hvs5_only || hvs5) {
+ if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
@@ -1490,7 +1500,10 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
- drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
+ if (vc4->is_vc5)
+ drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
+ else
+ drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
drm_plane_create_alpha_property(plane);
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 3c918eeaf56e..f6b7dc3df08c 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_rcl_setup setup = {0};
struct drm_vc4_submit_cl *args = exec->args;
bool has_bin = args->bin_cl_size != 0;
int ret;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
if (args->min_x_tile > args->max_x_tile ||
args->min_y_tile > args->max_y_tile) {
DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 7bb3067f8425..cc714dcfe1f2 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
int
vc4_v3d_pm_get(struct vc4_dev *vc4)
{
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
mutex_lock(&vc4->power_lock);
if (vc4->power_refcount++ == 0) {
int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
@@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
void
vc4_v3d_pm_put(struct vc4_dev *vc4)
{
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0) {
pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
@@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
uint64_t seqno = 0;
struct vc4_exec_info *exec;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
try_again:
spin_lock_irqsave(&vc4->job_lock, irqflags);
slot = ffs(~vc4->bin_alloc_used);
@@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
{
int ret = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
mutex_lock(&vc4->bin_bo_lock);
if (used && *used)
@@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref)
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
{
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return;
+
mutex_lock(&vc4->bin_bo_lock);
kref_put(&vc4->bin_bo_kref, bin_bo_release);
mutex_unlock(&vc4->bin_bo_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index eec76af49f04..2feba55bcef7 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
struct drm_gem_cma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
+ struct vc4_dev *vc4 = exec->dev;
struct drm_gem_cma_object *obj;
struct vc4_bo *bo;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return NULL;
+
if (hindex >= exec->bo_count) {
DRM_DEBUG("BO index %d greater than BO count %d\n",
hindex, exec->bo_count);
@@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
+ struct vc4_dev *vc4 = exec->dev;
uint32_t aligned_width, aligned_height, stride, size;
uint32_t utile_w = utile_width(cpp);
uint32_t utile_h = utile_height(cpp);
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return false;
+
/* The shaded vertex format stores signed 12.4 fixed point
* (-2048,2047) offsets from the viewport center, so we should
* never have a render target larger than 4096. The texture
@@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev,
void *unvalidated,
struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t len = exec->args->bin_cl_size;
uint32_t dst_offset = 0;
uint32_t src_offset = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
while (src_offset < len) {
void *dst_pkt = validated + dst_offset;
void *src_pkt = unvalidated + src_offset;
@@ -926,9 +938,13 @@ int
vc4_validate_shader_recs(struct drm_device *dev,
struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t i;
int ret = 0;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return -ENODEV;
+
for (i = 0; i < exec->shader_state_count; i++) {
ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 7cf82b071de2..e315aeb5fef5 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
+ struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
int shader_end_ip = 0;
uint32_t last_thread_switch_ip = -3;
@@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
+ if (WARN_ON_ONCE(vc4->is_vc5))
+ return NULL;
+
memset(&validation_state, 0, sizeof(validation_state));
validation_state.shader = shader_obj->vaddr;
validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 5a5bf4e5b717..e31554d7139f 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -71,7 +71,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
* the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_pgoff = 0;
/*
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 978ee2aab2d4..e0bc73124196 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (!input_device->hid_desc)
goto cleanup;
- input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+ input_device->report_desc_size = le16_to_cpu(
+ desc->desc[0].wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- desc->desc[0].wDescriptorLength);
+ le16_to_cpu(desc->desc[0].wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index b60f13481bdc..5b120402d405 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -21,6 +21,7 @@
#include <linux/cpu.h>
#include <linux/hyperv.h>
#include <asm/mshyperv.h>
+#include <linux/sched/isolation.h>
#include "hyperv_vmbus.h"
@@ -638,6 +639,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
if (newchannel->offermsg.offer.sub_channel_index == 0) {
mutex_unlock(&vmbus_connection.channel_mutex);
+ cpus_read_unlock();
/*
* Don't call free_channel(), because newchannel->kobj
* is not initialized yet.
@@ -728,16 +730,20 @@ static void init_vp_index(struct vmbus_channel *channel)
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
struct cpumask *allocated_mask;
+ const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
u32 target_cpu;
int numa_node;
if (!perf_chn ||
- !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
+ !alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
+ cpumask_empty(hk_mask)) {
/*
* If the channel is not a performance critical
* channel, bind it to VMBUS_CONNECT_CPU.
* In case alloc_cpumask_var() fails, bind it to
* VMBUS_CONNECT_CPU.
+ * If all the cpus are isolated, bind it to
+ * VMBUS_CONNECT_CPU.
*/
channel->target_cpu = VMBUS_CONNECT_CPU;
if (perf_chn)
@@ -758,17 +764,19 @@ static void init_vp_index(struct vmbus_channel *channel)
}
allocated_mask = &hv_context.hv_numa_map[numa_node];
- if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) {
+retry:
+ cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
+ cpumask_and(available_mask, available_mask, hk_mask);
+
+ if (cpumask_empty(available_mask)) {
/*
* We have cycled through all the CPUs in the node;
* reset the allocated map.
*/
cpumask_clear(allocated_mask);
+ goto retry;
}
- cpumask_xor(available_mask, allocated_mask,
- cpumask_of_node(numa_node));
-
target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, allocated_mask);
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index c698592b83e4..d35b60c06114 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -394,7 +394,7 @@ kvp_send_key(struct work_struct *dummy)
in_msg = kvp_transaction.kvp_msg;
/*
- * The key/value strings sent from the host are encoded in
+ * The key/value strings sent from the host are encoded
* in utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 714d549b7b46..547ae334e5cd 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -21,6 +21,7 @@
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
+#include <linux/sched/isolation.h>
#include <linux/sched/task_stack.h>
#include <linux/delay.h>
@@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (target_cpu >= nr_cpumask_bits)
return -EINVAL;
+ if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
+ return -EINVAL;
+
/* No CPUs should come up or down during this. */
cpus_read_lock();
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 57e11b2bab74..3633ab691662 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -259,7 +259,7 @@ static const struct ec_board_info board_info[] = {
},
{
.board_names = {
- "ROG CROSSHAIR VIII FORMULA"
+ "ROG CROSSHAIR VIII FORMULA",
"ROG CROSSHAIR VIII HERO",
"ROG CROSSHAIR VIII HERO (WI-FI)",
},
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 5c4cf742f5ae..157e232aace0 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
res = platform_device_add(data->pdev);
if (res)
- goto ipmi_err;
+ goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@@ -598,7 +598,9 @@ hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
- platform_device_unregister(data->pdev);
+ platform_device_del(data->pdev);
+dev_add_err:
+ platform_device_put(data->pdev);
dev_err:
ida_free(&aem_ida, data->id);
id_err:
@@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
res = platform_device_add(data->pdev);
if (res)
- goto ipmi_err;
+ goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@@ -738,7 +740,9 @@ hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
- platform_device_unregister(data->pdev);
+ platform_device_del(data->pdev);
+dev_add_err:
+ platform_device_put(data->pdev);
dev_err:
ida_free(&aem_ida, data->id);
id_err:
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index d78f4bebc718..157b73a3da29 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -145,7 +145,7 @@ static int occ_poll(struct occ *occ)
cmd[6] = 0; /* checksum lsb */
/* mutex should already be locked if necessary */
- rc = occ->send_cmd(occ, cmd, sizeof(cmd));
+ rc = occ->send_cmd(occ, cmd, sizeof(cmd), &occ->resp, sizeof(occ->resp));
if (rc) {
occ->last_error = rc;
if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
@@ -182,6 +182,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap)
{
int rc;
u8 cmd[8];
+ u8 resp[8];
__be16 user_power_cap_be = cpu_to_be16(user_power_cap);
cmd[0] = 0; /* sequence number */
@@ -198,7 +199,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 user_power_cap)
if (rc)
return rc;
- rc = occ->send_cmd(occ, cmd, sizeof(cmd));
+ rc = occ->send_cmd(occ, cmd, sizeof(cmd), resp, sizeof(resp));
mutex_unlock(&occ->lock);
@@ -1228,10 +1229,15 @@ EXPORT_SYMBOL_GPL(occ_setup);
void occ_shutdown(struct occ *occ)
{
+ mutex_lock(&occ->lock);
+
occ_shutdown_sysfs(occ);
if (occ->hwmon)
hwmon_device_unregister(occ->hwmon);
+ occ->hwmon = NULL;
+
+ mutex_unlock(&occ->lock);
}
EXPORT_SYMBOL_GPL(occ_shutdown);
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index 64d5ec7e169b..7ac4b2febce6 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -96,7 +96,8 @@ struct occ {
int powr_sample_time_us; /* average power sample time */
u8 poll_cmd_data; /* to perform OCC poll command */
- int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len);
+ int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len, void *resp,
+ size_t resp_len);
unsigned long next_update;
struct mutex lock; /* lock OCC access */
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index da39ea28df31..b221be1f35f3 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -111,7 +111,8 @@ static int p8_i2c_occ_putscom_be(struct i2c_client *client, u32 address,
be32_to_cpu(data1));
}
-static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
+static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
+ void *resp, size_t resp_len)
{
int i, rc;
unsigned long start;
@@ -120,7 +121,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
const long wait_time = msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS);
struct p8_i2c_occ *ctx = to_p8_i2c_occ(occ);
struct i2c_client *client = ctx->client;
- struct occ_response *resp = &occ->resp;
+ struct occ_response *or = (struct occ_response *)resp;
start = jiffies;
@@ -151,7 +152,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
return rc;
/* wait for OCC */
- if (resp->return_status == OCC_RESP_CMD_IN_PRG) {
+ if (or->return_status == OCC_RESP_CMD_IN_PRG) {
rc = -EALREADY;
if (time_after(jiffies, start + timeout))
@@ -163,7 +164,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
} while (rc);
/* check the OCC response */
- switch (resp->return_status) {
+ switch (or->return_status) {
case OCC_RESP_CMD_IN_PRG:
rc = -ETIMEDOUT;
break;
@@ -192,8 +193,8 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
if (rc < 0)
return rc;
- data_length = get_unaligned_be16(&resp->data_length);
- if (data_length > OCC_RESP_DATA_BYTES)
+ data_length = get_unaligned_be16(&or->data_length);
+ if ((data_length + 7) > resp_len)
return -EMSGSIZE;
/* fetch the rest of the response data */
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index 42fc7b97bb34..a91937e28e12 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -78,11 +78,10 @@ done:
return notify;
}
-static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
+static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
+ void *resp, size_t resp_len)
{
- struct occ_response *resp = &occ->resp;
struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
- size_t resp_len = sizeof(*resp);
int rc;
rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
@@ -96,7 +95,7 @@ static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
return rc;
}
- switch (resp->return_status) {
+ switch (((struct occ_response *)resp)->return_status) {
case OCC_RESP_CMD_IN_PRG:
rc = -ETIMEDOUT;
break;
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 6bc3273e31e7..3ad375a76f3e 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -148,7 +148,7 @@ static int ucd9200_probe(struct i2c_client *client)
* This only affects the READ_IOUT and READ_TEMPERATURE2 registers.
* READ_IOUT will return the sum of currents of all phases of a rail,
* and READ_TEMPERATURE2 will return the maximum temperature detected
- * for the the phases of the rail.
+ * for the phases of the rail.
*/
for (i = 0; i < info->pages; i++) {
/*
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b4c1ad19cdae..630cfa4ddd46 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
*/
static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
- unsigned int isr_status, avail_bytes, updatetx;
+ unsigned int isr_status, avail_bytes;
unsigned int bytes_to_send;
- bool hold_quirk;
+ bool updatetx;
struct cdns_i2c *id = ptr;
/* Signal completion only after everything is updated */
int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* Check if transfer size register needs to be updated again for a
* large data receive operation.
*/
- updatetx = 0;
- if (id->recv_count > id->curr_recv_count)
- updatetx = 1;
-
- hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+ updatetx = id->recv_count > id->curr_recv_count;
/* When receiving, handle data interrupt and completion interrupt */
if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
break;
}
- if (cdns_is_holdquirk(id, hold_quirk))
+ if (cdns_is_holdquirk(id, updatetx))
break;
}
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
* maintain transfer size non-zero while performing a large
* receive operation.
*/
- if (cdns_is_holdquirk(id, hold_quirk)) {
+ if (cdns_is_holdquirk(id, updatetx)) {
/* wait while fifo is full */
while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
(id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
CDNS_I2C_XFER_SIZE_OFFSET);
id->curr_recv_count = id->recv_count;
}
- } else if (id->recv_count && !hold_quirk &&
- !id->curr_recv_count) {
-
- /* Set the slave address in address register*/
- cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
- CDNS_I2C_ADDR_OFFSET);
-
- if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
- cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
- } else {
- cdns_i2c_writereg(id->recv_count,
- CDNS_I2C_XFER_SIZE_OFFSET);
- id->curr_recv_count = id->recv_count;
- }
}
/* Clear hold (if not repeated start) and signal completion */
@@ -1338,6 +1318,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
return 0;
err_clk_dis:
+ clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index e7d316b1401a..c023b691441e 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -477,9 +477,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
{
int ret;
- if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
-
if (prepare) {
/* Optional interface clock */
ret = clk_prepare_enable(dev->pclk);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 70ade5306e45..ba043b547393 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -320,8 +320,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
}
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (!i2c_dw_prepare_clk(dev, true)) {
+ dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = PTR_ERR(dev->clk);
+ goto exit_reset;
+ }
+
+ ret = i2c_dw_prepare_clk(dev, true);
+ if (ret)
+ goto exit_reset;
+
+ if (dev->clk) {
u64 clk_khz;
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e9e2db68b9fb..78fb1a4274a6 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -66,7 +66,7 @@
/* IMX I2C registers:
* the I2C register offset is different between SoCs,
- * to provid support for all these chips, split the
+ * to provide support for all these chips, split the
* register offset into a fixed base address and a
* variable shift value, then the full register offset
* will be calculated by
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 56aa424fd71d..815cc561386b 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -49,7 +49,7 @@
#define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
+#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency {
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index bdecb78bfc26..8e6985354fd5 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1420,17 +1420,22 @@ static int mtk_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev,
"Request I2C IRQ %d fail\n", irq);
- return ret;
+ goto err_bulk_unprepare;
}
i2c_set_adapdata(&i2c->adap, i2c);
ret = i2c_add_adapter(&i2c->adap);
if (ret)
- return ret;
+ goto err_bulk_unprepare;
platform_set_drvdata(pdev, i2c);
return 0;
+
+err_bulk_unprepare:
+ clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
+
+ return ret;
}
static int mtk_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 5960ccde6574..aede9d551130 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2372,8 +2372,7 @@ static struct platform_driver npcm_i2c_bus_driver = {
static int __init npcm_i2c_init(void)
{
npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
- platform_driver_register(&npcm_i2c_bus_driver);
- return 0;
+ return platform_driver_register(&npcm_i2c_bus_driver);
}
module_init(npcm_i2c_init);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ac8e7d60672a..39cb1b7bb865 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -161,7 +161,6 @@ static const char *piix4_aux_port_name_sb800 = " port 1";
struct sb800_mmio_cfg {
void __iomem *addr;
- struct resource *res;
bool use_mmio;
};
@@ -179,13 +178,11 @@ static int piix4_sb800_region_request(struct device *dev,
struct sb800_mmio_cfg *mmio_cfg)
{
if (mmio_cfg->use_mmio) {
- struct resource *res;
void __iomem *addr;
- res = request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
- SB800_PIIX4_FCH_PM_SIZE,
- "sb800_piix4_smb");
- if (!res) {
+ if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
+ SB800_PIIX4_FCH_PM_SIZE,
+ "sb800_piix4_smb")) {
dev_err(dev,
"SMBus base address memory region 0x%x already in use.\n",
SB800_PIIX4_FCH_PM_ADDR);
@@ -195,12 +192,12 @@ static int piix4_sb800_region_request(struct device *dev,
addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
SB800_PIIX4_FCH_PM_SIZE);
if (!addr) {
- release_resource(res);
+ release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ SB800_PIIX4_FCH_PM_SIZE);
dev_err(dev, "SMBus base address mapping failed.\n");
return -ENOMEM;
}
- mmio_cfg->res = res;
mmio_cfg->addr = addr;
return 0;
@@ -222,7 +219,8 @@ static void piix4_sb800_region_release(struct device *dev,
{
if (mmio_cfg->use_mmio) {
iounmap(mmio_cfg->addr);
- release_resource(mmio_cfg->res);
+ release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ SB800_PIIX4_FCH_PM_SIZE);
return;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b9bb94bd0f67..f5c6802aa6c3 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -47,11 +47,13 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -106,6 +108,12 @@ static unsigned int mwait_substates __initdata;
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
+ * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
+ * above.
+ */
+#define CPUIDLE_FLAG_IBRS BIT(16)
+
+/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -115,6 +123,18 @@ static unsigned int mwait_substates __initdata;
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+static __always_inline int __intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+ unsigned long ecx = 1; /* break on interrupt flag */
+
+ mwait_idle_with_hints(eax, ecx);
+
+ return index;
+}
+
/**
* intel_idle - Ask the processor to enter the given idle state.
* @dev: cpuidle device of the target CPU.
@@ -132,16 +152,37 @@ static unsigned int mwait_substates __initdata;
static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
- unsigned long ecx = 1; /* break on interrupt flag */
+ return __intel_idle(dev, drv, index);
+}
- if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)
- local_irq_enable();
+static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ int ret;
- mwait_idle_with_hints(eax, ecx);
+ raw_local_irq_enable();
+ ret = __intel_idle(dev, drv, index);
+ raw_local_irq_disable();
- return index;
+ return ret;
+}
+
+static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ bool smt_active = sched_smt_active();
+ u64 spec_ctrl = spec_ctrl_current();
+ int ret;
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
+ ret = __intel_idle(dev, drv, index);
+
+ if (smt_active)
+ wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
+
+ return ret;
}
/**
@@ -665,7 +706,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
@@ -673,7 +714,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C7s",
.desc = "MWAIT 0x33",
- .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
@@ -681,7 +722,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C8",
.desc = "MWAIT 0x40",
- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
@@ -689,7 +730,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C9",
.desc = "MWAIT 0x50",
- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
@@ -697,7 +738,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C10",
.desc = "MWAIT 0x60",
- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
@@ -726,7 +767,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
{
.name = "C6",
.desc = "MWAIT 0x20",
- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
@@ -1801,6 +1842,15 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
/* Structure copy. */
drv->states[drv->state_count] = cpuidle_state_table[cstate];
+ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
+ drv->states[drv->state_count].enter = intel_idle_irq;
+
+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
+ cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
+ WARN_ON_ONCE(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE);
+ drv->states[drv->state_count].enter = intel_idle_ibrs;
+ }
+
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
intel_idle_off_by_default(mwait_hint) &&
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 4f73bc827eec..9c9e98578667 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -1006,11 +1006,12 @@ static int bma180_probe(struct i2c_client *client,
data->trig->ops = &bma180_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
- indio_dev->trig = iio_trigger_get(data->trig);
ret = iio_trigger_register(data->trig);
if (ret)
goto err_trigger_free;
+
+ indio_dev->trig = iio_trigger_get(data->trig);
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index ac74cdcd2bc8..748b35c2f0c3 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1554,12 +1554,12 @@ static int kxcjk1013_probe(struct i2c_client *client,
data->dready_trig->ops = &kxcjk1013_trigger_ops;
iio_trigger_set_drvdata(data->dready_trig, indio_dev);
- indio_dev->trig = data->dready_trig;
- iio_trigger_get(indio_dev->trig);
ret = iio_trigger_register(data->dready_trig);
if (ret)
goto err_poweroff;
+ indio_dev->trig = iio_trigger_get(data->dready_trig);
+
data->motion_trig->ops = &kxcjk1013_trigger_ops;
iio_trigger_set_drvdata(data->motion_trig, indio_dev);
ret = iio_trigger_register(data->motion_trig);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 912a447e6310..c7d9ca96dbaa 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1511,10 +1511,14 @@ static int mma8452_reset(struct i2c_client *client)
int i;
int ret;
- ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
+ /*
+ * Find on fxls8471, after config reset bit, it reset immediately,
+ * and will not give ACK, so here do not check the return value.
+ * The following code will read the reset register, and check whether
+ * this reset works.
+ */
+ i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2,
MMA8452_CTRL_REG2_RST);
- if (ret < 0)
- return ret;
for (i = 0; i < 10; i++) {
usleep_range(100, 200);
@@ -1557,11 +1561,13 @@ static int mma8452_probe(struct i2c_client *client,
mutex_init(&data->lock);
data->chip_info = device_get_match_data(&client->dev);
- if (!data->chip_info && id) {
- data->chip_info = &mma_chip_info_table[id->driver_data];
- } else {
- dev_err(&client->dev, "unknown device model\n");
- return -ENODEV;
+ if (!data->chip_info) {
+ if (id) {
+ data->chip_info = &mma_chip_info_table[id->driver_data];
+ } else {
+ dev_err(&client->dev, "unknown device model\n");
+ return -ENODEV;
+ }
}
ret = iio_read_mount_matrix(&client->dev, &data->orientation);
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index b3afbf064915..df600d2917c0 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -456,8 +456,6 @@ static int mxc4005_probe(struct i2c_client *client,
data->dready_trig->ops = &mxc4005_trigger_ops;
iio_trigger_set_drvdata(data->dready_trig, indio_dev);
- indio_dev->trig = data->dready_trig;
- iio_trigger_get(indio_dev->trig);
ret = devm_iio_trigger_register(&client->dev,
data->dready_trig);
if (ret) {
@@ -465,6 +463,8 @@ static int mxc4005_probe(struct i2c_client *client,
"failed to register trigger\n");
return ret;
}
+
+ indio_dev->trig = iio_trigger_get(data->dready_trig);
}
return devm_iio_device_register(&client->dev, indio_dev);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index a73e3c2d212f..a9e655e69eaa 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -322,16 +322,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
if (!try_module_get(cl->dev->driver->owner)) {
mutex_unlock(&registered_clients_lock);
+ of_node_put(cln);
return ERR_PTR(-ENODEV);
}
get_device(cl->dev);
cl->info = info;
mutex_unlock(&registered_clients_lock);
+ of_node_put(cln);
return cl;
}
mutex_unlock(&registered_clients_lock);
+ of_node_put(cln);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 0793d2474cdc..9341e0e0eb55 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -186,6 +186,7 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev)
return -EOPNOTSUPP;
}
scu = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(scu)) {
dev_warn(data->dev, "Failed to get syscon regmap\n");
return -EOPNOTSUPP;
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index a4b8be5b8f88..580361bd9849 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
},
.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
},
+ {
+ /* Nuvision Solo 10 Draw */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TMAX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"),
+ },
+ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+ },
{}
};
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 7585144b9715..5b09a93fdf34 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -334,11 +334,15 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l
i = 0;
device_for_each_child_node(&pdev->dev, fwnode) {
ret = fwnode_property_read_u32(fwnode, "reg", &channel);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
return ret;
+ }
- if (channel >= RZG2L_ADC_MAX_CHANNELS)
+ if (channel >= RZG2L_ADC_MAX_CHANNELS) {
+ fwnode_handle_put(fwnode);
return -EINVAL;
+ }
chan_array[i].type = IIO_VOLTAGE;
chan_array[i].indexed = 1;
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 142656232157..3efb8c404ccc 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -64,6 +64,7 @@ struct stm32_adc_priv;
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
* @has_syscfg: SYSCFG capability flags
* @num_irqs: number of interrupt lines
+ * @num_adcs: maximum number of ADC instances in the common registers
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
@@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg {
u32 max_clk_rate_hz;
unsigned int has_syscfg;
unsigned int num_irqs;
+ unsigned int num_adcs;
};
/**
@@ -352,7 +354,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
* before invoking the interrupt handler (e.g. call ISR only for
* IRQ-enabled ADCs).
*/
- for (i = 0; i < priv->cfg->num_irqs; i++) {
+ for (i = 0; i < priv->cfg->num_adcs; i++) {
if ((status & priv->cfg->regs->eoc_msk[i] &&
stm32_adc_eoc_enabled(priv, i)) ||
(status & priv->cfg->regs->ovr_msk[i]))
@@ -792,6 +794,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
.clk_sel = stm32f4_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.num_irqs = 1,
+ .num_adcs = 3,
};
static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
@@ -800,14 +803,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER,
.num_irqs = 1,
+ .num_adcs = 2,
};
static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.regs = &stm32h7_adc_common_regs,
.clk_sel = stm32h7_adc_clk_sel,
- .max_clk_rate_hz = 40000000,
+ .max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
.num_irqs = 2,
+ .num_adcs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index a68ecbda6480..11ef873d6453 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1365,7 +1365,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
else
ret = -EINVAL;
- if (mask == IIO_CHAN_INFO_PROCESSED && adc->vrefint.vrefint_cal)
+ if (mask == IIO_CHAN_INFO_PROCESSED)
*val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val;
iio_device_release_direct_mode(indio_dev);
@@ -1407,7 +1407,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
/* Check ovr status right now, as ovr mask should be already disabled */
if (status & regs->isr_ovr.mask) {
@@ -1422,11 +1421,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
return IRQ_HANDLED;
}
- if (!(status & mask))
- dev_err_ratelimited(&indio_dev->dev,
- "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
- mask, status);
-
return IRQ_NONE;
}
@@ -1436,10 +1430,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_regspec *regs = adc->cfg->regs;
u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
- u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
-
- if (!(status & mask))
- return IRQ_WAKE_THREAD;
if (status & regs->isr_ovr.mask) {
/*
@@ -1979,10 +1969,10 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
for (i = 0; i < STM32_ADC_INT_CH_NB; i++) {
if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) {
- adc->int_ch[i] = chan;
-
- if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT)
- continue;
+ if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) {
+ adc->int_ch[i] = chan;
+ break;
+ }
/* Get calibration data for vrefint channel */
ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
@@ -1990,10 +1980,15 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
return dev_err_probe(indio_dev->dev.parent, ret,
"nvmem access error\n");
}
- if (ret == -ENOENT)
- dev_dbg(&indio_dev->dev, "vrefint calibration not found\n");
- else
- adc->vrefint.vrefint_cal = vrefint;
+ if (ret == -ENOENT) {
+ dev_dbg(&indio_dev->dev, "vrefint calibration not found. Skip vrefint channel\n");
+ return ret;
+ } else if (!vrefint) {
+ dev_dbg(&indio_dev->dev, "Null vrefint calibration value. Skip vrefint channel\n");
+ return -ENOENT;
+ }
+ adc->int_ch[i] = chan;
+ adc->vrefint.vrefint_cal = vrefint;
}
}
@@ -2030,7 +2025,9 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
}
strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
ret = stm32_adc_populate_int_ch(indio_dev, name, val);
- if (ret)
+ if (ret == -ENOENT)
+ continue;
+ else if (ret)
goto err;
} else if (ret != -EINVAL) {
dev_err(&indio_dev->dev, "Invalid label %d\n", ret);
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 0c2025a22575..80a09817c119 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -739,7 +739,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
device_for_each_child_node(dev, node) {
ret = fwnode_property_read_u32(node, "reg", &channel);
if (ret)
- return ret;
+ goto err_child_out;
ret = fwnode_property_read_u32(node, "ti,gain", &tmp);
if (ret) {
@@ -747,7 +747,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
} else {
ret = ads131e08_pga_gain_to_field_value(st, tmp);
if (ret < 0)
- return ret;
+ goto err_child_out;
channel_config[i].pga_gain = tmp;
}
@@ -758,7 +758,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
} else {
ret = ads131e08_validate_channel_mux(st, tmp);
if (ret)
- return ret;
+ goto err_child_out;
channel_config[i].mux = tmp;
}
@@ -784,6 +784,10 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev)
st->channel_config = channel_config;
return 0;
+
+err_child_out:
+ fwnode_handle_put(node);
+ return ret;
}
static void ads131e08_regulator_disable(void *data)
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index a55396c1f8b2..a7687706012d 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1409,7 +1409,7 @@ static int ams_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return ret;
+ return irq;
ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
indio_dev);
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index c6cf709f0f05..6949d2151025 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -277,7 +277,7 @@ static int rescale_configure_channel(struct device *dev,
chan->ext_info = rescale->ext_info;
chan->type = rescale->cfg->type;
- if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) ||
+ if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
dev_info(dev, "using raw+scale source channel\n");
} else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 847194fa1e46..80ef1aa9aae3 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -499,11 +499,11 @@ static int ccs811_probe(struct i2c_client *client,
data->drdy_trig->ops = &ccs811_trigger_ops;
iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
- indio_dev->trig = data->drdy_trig;
- iio_trigger_get(indio_dev->trig);
ret = iio_trigger_register(data->drdy_trig);
if (ret)
goto err_poweroff;
+
+ indio_dev->trig = iio_trigger_get(data->drdy_trig);
}
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c
index a7994f8e6b9b..1aac5665b5de 100644
--- a/drivers/iio/frequency/admv1014.c
+++ b/drivers/iio/frequency/admv1014.c
@@ -700,8 +700,10 @@ static int admv1014_init(struct admv1014_state *st)
ADMV1014_DET_EN_MSK;
enable_reg = FIELD_PREP(ADMV1014_P1DB_COMPENSATION_MSK, st->p1db_comp ? 3 : 0) |
- FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, !(st->input_mode)) |
- FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, st->input_mode) |
+ FIELD_PREP(ADMV1014_IF_AMP_PD_MSK,
+ (st->input_mode == ADMV1014_IF_MODE) ? 0 : 1) |
+ FIELD_PREP(ADMV1014_BB_AMP_PD_MSK,
+ (st->input_mode == ADMV1014_IF_MODE) ? 1 : 0) |
FIELD_PREP(ADMV1014_DET_EN_MSK, st->det_en);
return __admv1014_spi_update_bits(st, ADMV1014_REG_ENABLE, enable_reg_msk, enable_reg);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 4f19dc7ffe57..5908a96ca8af 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -875,6 +875,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050)
ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
MPU3050_PWR_MGM_SLEEP, 0);
if (ret) {
+ regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
dev_err(mpu3050->dev, "error setting power mode\n");
return ret;
}
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index f29692b9d2db..66b32413cf5e 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -135,9 +135,12 @@ int hts221_allocate_trigger(struct iio_dev *iio_dev)
iio_trigger_set_drvdata(hw->trig, iio_dev);
hw->trig->ops = &hts221_trigger_ops;
+
+ err = devm_iio_trigger_register(hw->dev, hw->trig);
+
iio_dev->trig = iio_trigger_get(hw->trig);
- return devm_iio_trigger_register(hw->dev, hw->trig);
+ return err;
}
static int hts221_buffer_preenable(struct iio_dev *iio_dev)
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index c0f5059b13b3..995a9dc06521 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -17,6 +17,7 @@
#include "inv_icm42600_buffer.h"
enum inv_icm42600_chip {
+ INV_CHIP_INVALID,
INV_CHIP_ICM42600,
INV_CHIP_ICM42602,
INV_CHIP_ICM42605,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 86858da9cc38..ca85fccc9839 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
bool open_drain;
int ret;
- if (chip < 0 || chip >= INV_CHIP_NB) {
+ if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) {
dev_err(dev, "invalid chip = %d\n", chip);
return -ENODEV;
}
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 9ff7b0e56cf6..b2bc637150bf 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
/* Sanity check, is this all zeroes? */
- if (memchr_inv(data, 0x00, 13)) {
+ if (memchr_inv(data, 0x00, 13) == NULL) {
if (!(data[13] & BIT(7)))
dev_warn(yas5xx->dev, "calibration is blank!\n");
}
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 70c37f664f6d..63fbcaa4cac8 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -885,6 +885,9 @@ sx9324_get_default_reg(struct device *dev, int idx,
break;
ret = device_property_read_u32_array(dev, prop, pin_defs,
ARRAY_SIZE(pin_defs));
+ if (ret)
+ break;
+
for (pin = 0; pin < SX9324_NUM_PINS; pin++)
raw |= (pin_defs[pin] << (2 * pin)) &
SX9324_REG_AFE_PH0_PIN_MASK(pin);
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 56ca0ad7e77a..4c66c3f18c34 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -6,7 +6,7 @@
# Keep in alphabetical order
config IIO_RESCALE_KUNIT_TEST
bool "Test IIO rescale conversion functions"
- depends on KUNIT=y && !IIO_RESCALE
+ depends on KUNIT=y && IIO_RESCALE=y
default KUNIT_ALL_TESTS
help
If you want to run tests on the iio-rescale code say Y here.
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index f15ae0a6394f..880360f8d02c 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -4,6 +4,6 @@
#
# Keep in alphabetical order
-obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o ../afe/iio-rescale.o
+obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index f1a8704e6cc1..d6c5e9644738 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -190,6 +190,7 @@ static int iio_sysfs_trigger_remove(int id)
}
iio_trigger_unregister(t->trig);
+ irq_work_sync(&t->work);
iio_trigger_free(t->trig);
list_del(&t->l);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1c107d6d03b9..b985e0d9bc05 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1252,8 +1252,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
return ERR_CAST(cm_id_priv);
err = cm_init_listen(cm_id_priv, service_id, 0);
- if (err)
+ if (err) {
+ ib_destroy_cm_id(&cm_id_priv->id);
return ERR_PTR(err);
+ }
spin_lock_irq(&cm_id_priv->lock);
listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 638bf4a1ed94..646fa8677490 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node;
struct list_head teardown_list;
struct ib_qp_attr attr;
- struct irdma_sc_vsi *vsi = &iwdev->vsi;
- struct irdma_sc_qp *sc_qp;
- struct irdma_qp *qp;
- int i;
INIT_LIST_HEAD(&teardown_list);
@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
- if (!iwdev->roce_mode)
- return;
-
- INIT_LIST_HEAD(&teardown_list);
- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
- mutex_lock(&vsi->qos[i].qos_mutex);
- list_for_each_safe (list_node, list_core_temp,
- &vsi->qos[i].qplist) {
- u32 qp_ip[4];
-
- sc_qp = container_of(list_node, struct irdma_sc_qp,
- list);
- if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
- continue;
-
- qp = sc_qp->qp_uk.back_qp;
- if (!disconnect_all) {
- if (nfo->ipv4)
- qp_ip[0] = qp->udp_info.local_ipaddr[3];
- else
- memcpy(qp_ip,
- &qp->udp_info.local_ipaddr[0],
- sizeof(qp_ip));
- }
-
- if (disconnect_all ||
- (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
- !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
- spin_lock(&iwdev->rf->qptable_lock);
- if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
- irdma_qp_add_ref(&qp->ibqp);
- list_add(&qp->teardown_entry,
- &teardown_list);
- }
- spin_unlock(&iwdev->rf->qptable_lock);
- }
- }
- mutex_unlock(&vsi->qos[i].qos_mutex);
- }
-
- list_for_each_safe (list_node, list_core_temp, &teardown_list) {
- qp = container_of(list_node, struct irdma_qp, teardown_entry);
- attr.qp_state = IB_QPS_ERR;
- irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
- irdma_qp_rem_ref(&qp->ibqp);
- }
}
/**
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index e46fc110004d..50299f58b6b3 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index cf53b17510cd..5986fd906308 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 46c12334c735..4789e85d717b 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
+ u64 page_size_cap;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index c4412ece5a6d..96135a228f26 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev,
props->vendor_part_id = pcidev->device;
props->hw_ver = rf->pcidev->revision;
- props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ props->page_size_cap = hw_attrs->page_size_cap;
props->max_mr_size = hw_attrs->max_mr_size;
props->max_qp = rf->max_qp - rf->used_qps;
props->max_qp_wr = hw_attrs->max_qp_wr;
@@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
iwmr->page_size = ib_umem_find_best_pgsz(region,
- SZ_4K | SZ_2M | SZ_1G,
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap,
virt);
if (unlikely(!iwmr->page_size)) {
kfree(iwmr);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 8def88cfa300..db9ef3e1eb97 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -418,6 +418,7 @@ struct qedr_qp {
u32 sq_psn;
u32 qkey;
u32 dest_qp_num;
+ u8 timeout;
/* Relevant to qps created from kernel space only (ULPs) */
u8 prev_wqe_size;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index f0f43b6db89e..03ed7c0fae50 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2613,6 +2613,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1 << max_t(int, attr->timeout - 8, 0);
else
qp_params.ack_timeout = 0;
+
+ qp->timeout = attr->timeout;
}
if (attr_mask & IB_QP_RETRY_CNT) {
@@ -2772,7 +2774,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
rdma_ah_set_sl(&qp_attr->ah_attr, 0);
- qp_attr->timeout = params.timeout;
+ qp_attr->timeout = qp->timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 505a032e2786..9dcf3f51f2dd 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -402,6 +402,7 @@ config JOYSTICK_N64
config JOYSTICK_SENSEHAT
tristate "Raspberry Pi Sense HAT joystick"
depends on INPUT && I2C
+ depends on HAS_IOMEM
select MFD_SIMPLE_MFD_I2C
help
Say Y here if you want to enable the driver for the
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index cbb1599a520e..480476121c01 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
},
{
/*
- * Lenovo Yoga Tab2 1051L, something messes with the home-button
+ * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
* IRQ settings, leading to a non working home-button.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
},
},
{} /* Terminating entry */
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 59a14505b9cd..ca150618d32f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_data)
goto err_free_bt_buffer;
- if (dev->bt_urb)
+ if (dev->bt_urb) {
usb_fill_int_urb(dev->bt_urb, udev,
usb_rcvintpipe(udev, cfg->bt_ep),
dev->bt_data, dev->cfg.bt_datalen,
bcm5974_irq_button, dev, 1);
+ dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ }
+
usb_fill_int_urb(dev->tp_urb, udev,
usb_rcvintpipe(udev, cfg->tp_ep),
dev->tp_data, dev->cfg.tp_datalen,
bcm5974_irq_trackpad, dev, 1);
+ dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
/* create bcm5974 device */
usb_make_path(udev, dev->phys, sizeof(dev->phys));
strlcat(dev->phys, "/input0", sizeof(dev->phys));
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 3ad9870db108..aa45a9fee6a0 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -900,6 +900,11 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
} else {
dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
ts->gpio_count, ts->gpio_int_idx);
+ /*
+ * On some devices _PS0 does a reset for us and
+ * sometimes this is necessary for things to work.
+ */
+ acpi_device_fix_up_power(ACPI_COMPANION(dev));
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 43c521f50c85..3dda6eaabdab 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1654,6 +1654,9 @@ static int usbtouch_probe(struct usb_interface *intf,
if (id->driver_info == DEVTYPE_IGNORE)
return -ENODEV;
+ if (id->driver_info >= ARRAY_SIZE(usbtouch_dev_info))
+ return -ENODEV;
+
endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting);
if (!endpoint)
return -ENXIO;
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 2757c7768ffe..f51ab5614532 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -758,7 +758,9 @@ batt_err:
static int wm97xx_mfd_remove(struct platform_device *pdev)
{
- return wm97xx_remove(&pdev->dev);
+ wm97xx_remove(&pdev->dev);
+
+ return 0;
}
static int __maybe_unused wm97xx_suspend(struct device *dev)
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 592c1e1a5d4b..9699ca101c62 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -382,7 +382,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
static struct notifier_block dmar_pci_bus_nb = {
.notifier_call = dmar_pci_bus_notifier,
- .priority = INT_MIN,
+ .priority = 1,
};
static struct dmar_drhd_unit *
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 44016594831d..5c0dce78586a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -320,30 +320,6 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
-/*
- * Iterate over elements in device_domain_list and call the specified
- * callback @fn against each element.
- */
-int for_each_device_domain(int (*fn)(struct device_domain_info *info,
- void *data), void *data)
-{
- int ret = 0;
- unsigned long flags;
- struct device_domain_info *info;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &device_domain_list, global) {
- ret = fn(info, data);
- if (ret) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return ret;
- }
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-}
-
const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index cb4c1d0cf25c..17cad7c1f62d 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -86,54 +86,6 @@ void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
/*
* Per device pasid table management:
*/
-static inline void
-device_attach_pasid_table(struct device_domain_info *info,
- struct pasid_table *pasid_table)
-{
- info->pasid_table = pasid_table;
- list_add(&info->table, &pasid_table->dev);
-}
-
-static inline void
-device_detach_pasid_table(struct device_domain_info *info,
- struct pasid_table *pasid_table)
-{
- info->pasid_table = NULL;
- list_del(&info->table);
-}
-
-struct pasid_table_opaque {
- struct pasid_table **pasid_table;
- int segment;
- int bus;
- int devfn;
-};
-
-static int search_pasid_table(struct device_domain_info *info, void *opaque)
-{
- struct pasid_table_opaque *data = opaque;
-
- if (info->iommu->segment == data->segment &&
- info->bus == data->bus &&
- info->devfn == data->devfn &&
- info->pasid_table) {
- *data->pasid_table = info->pasid_table;
- return 1;
- }
-
- return 0;
-}
-
-static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
-{
- struct pasid_table_opaque *data = opaque;
-
- data->segment = pci_domain_nr(pdev->bus);
- data->bus = PCI_BUS_NUM(alias);
- data->devfn = alias & 0xff;
-
- return for_each_device_domain(&search_pasid_table, data);
-}
/*
* Allocate a pasid table for @dev. It should be called in a
@@ -143,28 +95,18 @@ int intel_pasid_alloc_table(struct device *dev)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
- struct pasid_table_opaque data;
struct page *pages;
u32 max_pasid = 0;
- int ret, order;
- int size;
+ int order, size;
might_sleep();
info = dev_iommu_priv_get(dev);
if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
return -EINVAL;
- /* DMA alias device already has a pasid table, use it: */
- data.pasid_table = &pasid_table;
- ret = pci_for_each_dma_alias(to_pci_dev(dev),
- &get_alias_pasid_table, &data);
- if (ret)
- goto attach_out;
-
pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
if (!pasid_table)
return -ENOMEM;
- INIT_LIST_HEAD(&pasid_table->dev);
if (info->pasid_supported)
max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
@@ -182,9 +124,7 @@ int intel_pasid_alloc_table(struct device *dev)
pasid_table->table = page_address(pages);
pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
-
-attach_out:
- device_attach_pasid_table(info, pasid_table);
+ info->pasid_table = pasid_table;
return 0;
}
@@ -202,10 +142,7 @@ void intel_pasid_free_table(struct device *dev)
return;
pasid_table = info->pasid_table;
- device_detach_pasid_table(info, pasid_table);
-
- if (!list_empty(&pasid_table->dev))
- return;
+ info->pasid_table = NULL;
/* Free scalable mode PASID directory tables: */
dir = pasid_table->table;
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 583ea67fc783..bf5b937848b4 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -74,7 +74,6 @@ struct pasid_table {
void *table; /* pasid table pointer */
int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
- struct list_head dev; /* device list */
};
/* Get PRESENT bit of a PASID directory entry. */
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 8fdb84b3642b..1d42084d0276 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -987,7 +987,7 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a779a0",
.data = &ipmmu_features_rcar_gen4,
}, {
- .compatible = "renesas,rcar-gen4-ipmmu",
+ .compatible = "renesas,rcar-gen4-ipmmu-vmsa",
.data = &ipmmu_features_rcar_gen4,
}, {
/* Terminator */
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 4ab1038b5482..bbb11cb8b0f7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -298,7 +298,7 @@ config XTENSA_MX
config XILINX_INTC
bool "Xilinx Interrupt Controller IP"
- depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
+ depends on OF_ADDRESS
select IRQ_DOMAIN
help
Support for the Xilinx Interrupt Controller IP core.
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 12dd48727a15..1c2813ad8bbe 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -228,7 +228,7 @@
#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
-DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
+static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
struct aic_info {
int version;
@@ -1035,6 +1035,7 @@ static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
continue;
cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
if (WARN_ON(cpu < 0))
continue;
@@ -1143,6 +1144,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
for_each_child_of_node(affs, chld)
build_fiq_affinity(irqc, chld);
}
+ of_node_put(affs);
set_handle_irq(aic_handle_irq);
set_handle_fiq(aic_handle_fiq);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index b4c1924f0255..38fab02ffe9d 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent)
/* The PB11MPCore GIC needs to be configured in the syscon */
map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (!IS_ERR(map)) {
/* new irq mode with no DCC */
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2be8dea6b6b0..2d25bca63d2a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1932,7 +1932,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
if (!gic_data.ppi_descs)
- return;
+ goto out_put_node;
nr_parts = of_get_child_count(parts_node);
@@ -1973,12 +1973,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
continue;
cpu = of_cpu_node_to_id(cpu_node);
- if (WARN_ON(cpu < 0))
+ if (WARN_ON(cpu < 0)) {
+ of_node_put(cpu_node);
continue;
+ }
pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask);
+ of_node_put(cpu_node);
}
pr_cont("}\n");
@@ -2039,15 +2042,40 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
vgic_set_kvm_info(&gic_v3_kvm_info);
}
+static void gic_request_region(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ if (!request_mem_region(base, size, name))
+ pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
+ name, &base);
+}
+
+static void __iomem *gic_of_iomap(struct device_node *node, int idx,
+ const char *name, struct resource *res)
+{
+ void __iomem *base;
+ int ret;
+
+ ret = of_address_to_resource(node, idx, res);
+ if (ret)
+ return IOMEM_ERR_PTR(ret);
+
+ gic_request_region(res->start, resource_size(res), name);
+ base = of_iomap(node, idx);
+
+ return base ?: IOMEM_ERR_PTR(-ENOMEM);
+}
+
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
struct redist_region *rdist_regs;
+ struct resource res;
u64 redist_stride;
u32 nr_redist_regions;
int err, i;
- dist_base = of_io_request_and_map(node, 0, "GICD");
+ dist_base = gic_of_iomap(node, 0, "GICD", &res);
if (IS_ERR(dist_base)) {
pr_err("%pOF: unable to map gic dist registers\n", node);
return PTR_ERR(dist_base);
@@ -2070,12 +2098,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
}
for (i = 0; i < nr_redist_regions; i++) {
- struct resource res;
- int ret;
-
- ret = of_address_to_resource(node, 1 + i, &res);
- rdist_regs[i].redist_base = of_io_request_and_map(node, 1 + i, "GICR");
- if (ret || IS_ERR(rdist_regs[i].redist_base)) {
+ rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
+ if (IS_ERR(rdist_regs[i].redist_base)) {
pr_err("%pOF: couldn't map region %d\n", node, i);
err = -ENODEV;
goto out_unmap_rdist;
@@ -2148,7 +2172,7 @@ gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
return -ENOMEM;
}
- request_mem_region(redist->base_address, redist->length, "GICR");
+ gic_request_region(redist->base_address, redist->length, "GICR");
gic_acpi_register_redist(redist->base_address, redist_base);
return 0;
@@ -2171,7 +2195,7 @@ gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
- request_mem_region(gicc->gicr_base_address, size, "GICR");
+ gic_request_region(gicc->gicr_base_address, size, "GICR");
gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
return 0;
@@ -2373,7 +2397,7 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
pr_err("Unable to map GICD registers\n");
return -ENOMEM;
}
- request_mem_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
+ gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index aed88857d90f..8d05d8bcf56f 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -39,6 +39,12 @@
#define LIOINTC_ERRATA_IRQ 10
+#if defined(CONFIG_MIPS)
+#define liointc_core_id get_ebase_cpunum()
+#else
+#define liointc_core_id get_csr_cpuid()
+#endif
+
struct liointc_handler_data {
struct liointc_priv *priv;
u32 parent_int_map;
@@ -57,7 +63,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc)
struct liointc_handler_data *handler = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_chip_generic *gc = handler->priv->gc;
- int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES;
+ int core = liointc_core_id % LIOINTC_NUM_CORES;
u32 pending;
chained_irq_enter(chip, desc);
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index 49b47e787644..f289ccd95291 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -66,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = {
.name = "or1k-PIC-level",
.irq_unmask = or1k_pic_unmask,
.irq_mask = or1k_pic_mask,
- .irq_mask_ack = or1k_pic_mask_ack,
},
.handle = handle_level_irq,
.flags = IRQ_LEVEL | IRQ_NOPROBE,
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index 50a56820c99b..56bf502d9c67 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -134,9 +134,9 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
if (!cpu_ictl)
return -EINVAL;
ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp);
+ of_node_put(cpu_ictl);
if (ret || tmp != 1)
return -EINVAL;
- of_node_put(cpu_ictl);
cpu_int = be32_to_cpup(imap + 2);
if (cpu_int > 7 || cpu_int < 2)
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
index 89121b39be26..716b1bb88bf2 100644
--- a/drivers/irqchip/irq-uniphier-aidet.c
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -237,6 +237,7 @@ static const struct of_device_id uniphier_aidet_match[] = {
{ .compatible = "socionext,uniphier-ld11-aidet" },
{ .compatible = "socionext,uniphier-ld20-aidet" },
{ .compatible = "socionext,uniphier-pxs3-aidet" },
+ { .compatible = "socionext,uniphier-nx1-aidet" },
{ /* sentinel */ }
};
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index d21648a923ea..c954ff91870e 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -33,6 +33,14 @@ struct dm_kobject_holder {
* access their members!
*/
+/*
+ * For mempools pre-allocation at the table loading time.
+ */
+struct dm_md_mempools {
+ struct bio_set bs;
+ struct bio_set io_bs;
+};
+
struct mapped_device {
struct mutex suspend_lock;
@@ -110,8 +118,7 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
- struct bio_set io_bs;
- struct bio_set bs;
+ struct dm_md_mempools *mempools;
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
@@ -265,6 +272,7 @@ struct dm_io {
atomic_t io_count;
struct mapped_device *md;
+ struct bio *split_bio;
/* The three fields represent mapped part of original bio */
struct bio *orig_bio;
unsigned int sector_offset; /* offset to end of orig_bio */
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 1f6bf152b3c7..e92c1afc3677 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1400,7 +1400,7 @@ static void start_worker(struct era *era)
static void stop_worker(struct era *era)
{
atomic_set(&era->suspended, 1);
- flush_workqueue(era->wq);
+ drain_workqueue(era->wq);
}
/*----------------------------------------------------------------
@@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti)
}
stop_worker(era);
+
+ r = metadata_commit(era->md);
+ if (r) {
+ DMERR("%s: metadata_commit failed", __func__);
+ /* FIXME: fail mode */
+ }
}
static int era_preresume(struct dm_target *ti)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 06f328928a7f..0c6620e7b7bf 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
/*
* Work out how many "unsigned long"s we need to hold the bitset.
*/
- bitset_size = dm_round_up(region_count,
- sizeof(*lc->clean_bits) << BYTE_SHIFT);
+ bitset_size = dm_round_up(region_count, BITS_PER_LONG);
bitset_size >>= BYTE_SHIFT;
lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
@@ -616,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log)
log_clear_bit(lc, lc->clean_bits, i);
/* clear any old bits -- device has shrunk */
- for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
+ for (i = lc->region_count; i % BITS_PER_LONG; i++)
log_clear_bit(lc, lc->clean_bits, i);
/* copy clean across to sync */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5e41fbae3f6b..80c9f7134e9b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned int i, rebuild_cnt = 0;
- unsigned int rebuilds_per_group = 0, copies;
+ unsigned int rebuilds_per_group = 0, copies, raid_disks;
unsigned int group_size, last_group_start;
- for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
- !rs->dev[i].rdev.sb_page)
+ for (i = 0; i < rs->raid_disks; i++)
+ if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
+ ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+ !rs->dev[i].rdev.sb_page)))
rebuild_cnt++;
switch (rs->md.level) {
@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
* A A B B C
* C D D E E
*/
+ raid_disks = min(rs->raid_disks, rs->md.raid_disks);
if (__is_raid10_near(rs->md.new_layout)) {
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < raid_disks; i++) {
if (!(i % copies))
rebuilds_per_group = 0;
if ((!rs->dev[i].rdev.sb_page ||
@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs)
* results in the need to treat the last (potentially larger)
* set differently.
*/
- group_size = (rs->md.raid_disks / copies);
- last_group_start = (rs->md.raid_disks / group_size) - 1;
+ group_size = (raid_disks / copies);
+ last_group_start = (raid_disks / group_size) - 1;
last_group_start *= group_size;
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < raid_disks; i++) {
if (!(i % copies) && !(i > last_group_start))
rebuilds_per_group = 0;
if ((!rs->dev[i].rdev.sb_page ||
@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
struct md_rdev *rdev = &rs->dev[i].rdev;
if (!test_bit(Journal, &rdev->flags) &&
@@ -3725,7 +3727,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev, false);
+ md_reap_sync_thread(mddev);
}
} else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
@@ -3766,13 +3768,13 @@ static int raid_iterate_devices(struct dm_target *ti,
unsigned int i;
int r = 0;
- for (i = 0; !r && i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev)
- r = fn(ti,
- rs->dev[i].data_dev,
- 0, /* No offset on data devs */
- rs->md.dev_sectors,
- data);
+ for (i = 0; !r && i < rs->raid_disks; i++) {
+ if (rs->dev[i].data_dev) {
+ r = fn(ti, rs->dev[i].data_dev,
+ 0, /* No offset on data devs */
+ rs->md.dev_sectors, data);
+ }
+ }
return r;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6087cdcaad46..a83b98a8d2a9 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq,
{
int r;
- r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
+ r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
dm_rq_bio_constructor, tio);
if (r)
return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0e833a154b31..bd539afbfe88 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
return 0;
}
-void dm_table_free_md_mempools(struct dm_table *t)
-{
- dm_free_md_mempools(t->mempools);
- t->mempools = NULL;
-}
-
-struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
-{
- return t->mempools;
-}
-
static int setup_indexes(struct dm_table *t)
{
int i;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dfb0a551bd88..2b75f1ef7386 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -136,14 +136,6 @@ static int get_swap_bios(void)
return latch;
}
-/*
- * For mempools pre-allocation at the table loading time.
- */
-struct dm_md_mempools {
- struct bio_set bs;
- struct bio_set io_bs;
-};
-
struct table_device {
struct list_head list;
refcount_t count;
@@ -563,6 +555,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
unsigned long flags;
/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
spin_lock_irqsave(&io->lock, flags);
+ if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
+ spin_unlock_irqrestore(&io->lock, flags);
+ return;
+ }
dm_io_set_flag(io, DM_IO_ACCOUNTED);
spin_unlock_irqrestore(&io->lock, flags);
}
@@ -581,7 +577,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio;
struct bio *clone;
- clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs);
+ clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
/* Set default bdev, but target must bio_set_dev() before issuing IO */
clone->bi_bdev = md->disk->part0;
@@ -598,6 +594,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
atomic_set(&io->io_count, 2);
this_cpu_inc(*md->pending_io);
io->orig_bio = bio;
+ io->split_bio = NULL;
io->md = md;
spin_lock_init(&io->lock);
io->start_time = jiffies;
@@ -628,7 +625,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
} else {
struct mapped_device *md = ci->io->md;
- clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs);
+ clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
+ &md->mempools->bs);
if (!clone)
return NULL;
/* Set default bdev, but target must bio_set_dev() before issuing IO */
@@ -718,18 +716,18 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
}
static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
- int *srcu_idx, struct bio *bio)
+ int *srcu_idx, unsigned bio_opf)
{
- if (bio->bi_opf & REQ_NOWAIT)
+ if (bio_opf & REQ_NOWAIT)
return dm_get_live_table_fast(md);
else
return dm_get_live_table(md, srcu_idx);
}
static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
- struct bio *bio)
+ unsigned bio_opf)
{
- if (bio->bi_opf & REQ_NOWAIT)
+ if (bio_opf & REQ_NOWAIT)
dm_put_live_table_fast(md);
else
dm_put_live_table(md, srcu_idx);
@@ -890,7 +888,7 @@ static void dm_io_complete(struct dm_io *io)
{
blk_status_t io_error;
struct mapped_device *md = io->md;
- struct bio *bio = io->orig_bio;
+ struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
if (io->status == BLK_STS_DM_REQUEUE) {
unsigned long flags;
@@ -942,9 +940,11 @@ static void dm_io_complete(struct dm_io *io)
if (io_error == BLK_STS_AGAIN) {
/* io_uring doesn't handle BLK_STS_AGAIN (yet) */
queue_io(md, bio);
+ return;
}
}
- return;
+ if (io_error == BLK_STS_DM_REQUEUE)
+ return;
}
if (bio_is_flush_with_data(bio)) {
@@ -1023,23 +1023,19 @@ static void clone_endio(struct bio *bio)
struct dm_io *io = tio->io;
struct mapped_device *md = io->md;
- if (likely(bio->bi_bdev != md->disk->part0)) {
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
- if (unlikely(error == BLK_STS_TARGET)) {
- if (bio_op(bio) == REQ_OP_DISCARD &&
- !bdev_max_discard_sectors(bio->bi_bdev))
- disable_discard(md);
- else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !q->limits.max_write_zeroes_sectors)
- disable_write_zeroes(md);
- }
-
- if (static_branch_unlikely(&zoned_enabled) &&
- unlikely(blk_queue_is_zoned(q)))
- dm_zone_endio(io, bio);
+ if (unlikely(error == BLK_STS_TARGET)) {
+ if (bio_op(bio) == REQ_OP_DISCARD &&
+ !bdev_max_discard_sectors(bio->bi_bdev))
+ disable_discard(md);
+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+ !bdev_write_zeroes_sectors(bio->bi_bdev))
+ disable_write_zeroes(md);
}
+ if (static_branch_unlikely(&zoned_enabled) &&
+ unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+ dm_zone_endio(io, bio);
+
if (endio) {
int r = endio(ti, bio, &error);
switch (r) {
@@ -1620,7 +1616,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
ti = dm_table_find_target(ci->map, ci->sector);
if (unlikely(!ti))
return BLK_STS_IOERR;
- else if (unlikely(ci->is_abnormal_io))
+
+ if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
+ unlikely(!dm_target_supports_nowait(ti->type)))
+ return BLK_STS_NOTSUPP;
+
+ if (unlikely(ci->is_abnormal_io))
return __process_abnormal_io(ci, ti);
/*
@@ -1693,9 +1694,11 @@ static void dm_split_and_process_bio(struct mapped_device *md,
* Remainder must be passed to submit_bio_noacct() so it gets handled
* *after* bios already submitted have been completely processed.
*/
- bio_trim(bio, io->sectors, ci.sector_count);
- trace_block_split(bio, bio->bi_iter.bi_sector);
- bio_inc_remaining(bio);
+ WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT));
+ io->split_bio = bio_split(bio, io->sectors, GFP_NOIO,
+ &md->queue->bio_split);
+ bio_chain(io->split_bio, bio);
+ trace_block_split(io->split_bio, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
out:
/*
@@ -1722,8 +1725,9 @@ static void dm_submit_bio(struct bio *bio)
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
int srcu_idx;
struct dm_table *map;
+ unsigned bio_opf = bio->bi_opf;
- map = dm_get_live_table_bio(md, &srcu_idx, bio);
+ map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
/* If suspended, or map not yet available, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
@@ -1739,7 +1743,7 @@ static void dm_submit_bio(struct bio *bio)
dm_split_and_process_bio(md, map, bio);
out:
- dm_put_live_table_bio(md, srcu_idx, bio);
+ dm_put_live_table_bio(md, srcu_idx, bio_opf);
}
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
@@ -1876,8 +1880,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
- bioset_exit(&md->bs);
- bioset_exit(&md->io_bs);
+ dm_free_md_mempools(md->mempools);
if (md->dax_dev) {
dax_remove_host(md->disk);
@@ -2049,48 +2052,6 @@ static void free_dev(struct mapped_device *md)
kvfree(md);
}
-static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
-{
- struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- int ret = 0;
-
- if (dm_table_bio_based(t)) {
- /*
- * The md may already have mempools that need changing.
- * If so, reload bioset because front_pad may have changed
- * because a different table was loaded.
- */
- bioset_exit(&md->bs);
- bioset_exit(&md->io_bs);
-
- } else if (bioset_initialized(&md->bs)) {
- /*
- * There's no need to reload with request-based dm
- * because the size of front_pad doesn't change.
- * Note for future: If you are to reload bioset,
- * prep-ed requests in the queue may refer
- * to bio from the old bioset, so you must walk
- * through the queue to unprep.
- */
- goto out;
- }
-
- BUG_ON(!p ||
- bioset_initialized(&md->bs) ||
- bioset_initialized(&md->io_bs));
-
- ret = bioset_init_from_src(&md->bs, &p->bs);
- if (ret)
- goto out;
- ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
- if (ret)
- bioset_exit(&md->bs);
-out:
- /* mempool bind completed, no longer need any mempools in the table */
- dm_table_free_md_mempools(t);
- return ret;
-}
-
/*
* Bind a table to the device.
*/
@@ -2144,12 +2105,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* immutable singletons - used to optimize dm_mq_queue_rq.
*/
md->immutable_target = dm_table_get_immutable_target(t);
- }
- ret = __bind_mempools(md, t);
- if (ret) {
- old_map = ERR_PTR(ret);
- goto out;
+ /*
+ * There is no need to reload with request-based dm because the
+ * size of front_pad doesn't change.
+ *
+ * Note for future: If you are to reload bioset, prep-ed
+ * requests in the queue may refer to bio from the old bioset,
+ * so you must walk through the queue to unprep.
+ */
+ if (!md->mempools) {
+ md->mempools = t->mempools;
+ t->mempools = NULL;
+ }
+ } else {
+ /*
+ * The md may already have mempools that need changing.
+ * If so, reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ dm_free_md_mempools(md->mempools);
+ md->mempools = t->mempools;
+ t->mempools = NULL;
}
ret = dm_table_set_restrictions(t, md->queue, limits);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 3f89664fea01..a8405ce305a9 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
-void dm_table_free_md_mempools(struct dm_table *t);
-struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8273ac5eef06..c7ecb0bffda0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev, true);
+ md_reap_sync_thread(mddev);
}
mddev_unlock(mddev);
}
@@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev)
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev, true);
+ md_reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
@@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev)
* ->spare_active and clear saved_raid_disk
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev, true);
+ md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
}
if (mddev->sync_thread) {
- md_reap_sync_thread(mddev, true);
+ md_reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
@@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev)
}
EXPORT_SYMBOL(md_check_recovery);
-void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held)
+void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
sector_t old_dev_sectors = mddev->dev_sectors;
bool is_reshaped = false;
- if (reconfig_mutex_held)
- mddev_unlock(mddev);
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
- if (reconfig_mutex_held)
- mddev_lock_nointr(mddev);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 5f62c46ac2d3..cf2cbb17acbd 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread(
extern void md_unregister_thread(struct md_thread **threadp);
extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev);
-extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held);
+extern void md_reap_sync_thread(struct mddev *mddev);
extern int mddev_init_writes_pending(struct mddev *mddev);
extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 973e2e06f19c..0a2e4806b1ec 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io)
if (bdev) {
struct bio *bio;
- bio = bio_alloc_bioset(bdev, 0, GFP_NOIO,
+ bio = bio_alloc_bioset(bdev, 0,
REQ_OP_WRITE | REQ_PREFLUSH,
- &ppl_conf->flush_bs);
+ GFP_NOIO, &ppl_conf->flush_bs);
bio->bi_private = io;
bio->bi_end_io = ppl_flush_endio;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5d09256d7f81..c8539d0e12dd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
conf->mddev = mddev;
- if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+ ret = -ENOMEM;
+ conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!conf->stripe_hashtbl)
goto abort;
/* We init hash_locks[0] separately to that it can be used
@@ -7933,7 +7935,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = 0;
int number = rdev->raid_disk;
struct md_rdev __rcu **rdevp;
- struct disk_info *p = conf->disks + number;
+ struct disk_info *p;
struct md_rdev *tmp;
print_raid5_conf(conf);
@@ -7952,6 +7954,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
log_exit(conf);
return 0;
}
+ if (unlikely(number >= conf->pool_size))
+ return 0;
+ p = conf->disks + number;
if (rdev == rcu_access_pointer(p->rdev))
rdevp = &p->rdev;
else if (rdev == rcu_access_pointer(p->replacement))
@@ -8062,6 +8067,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk <= last &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
first = rdev->saved_raid_disk;
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index b7800b37af78..ac1a411648d8 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -105,6 +105,7 @@ config TI_EMIF
config OMAP_GPMC
tristate "Texas Instruments OMAP SoC GPMC driver"
depends on OF_ADDRESS
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select GPIOLIB
help
This driver is for the General Purpose Memory Controller (GPMC)
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 86a3d34f418e..4c5154e0bf00 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -404,13 +404,16 @@ static int mtk_smi_device_link_common(struct device *dev, struct device **com_de
of_node_put(smi_com_node);
if (smi_com_pdev) {
/* smi common is the supplier, Make sure it is ready before */
- if (!platform_get_drvdata(smi_com_pdev))
+ if (!platform_get_drvdata(smi_com_pdev)) {
+ put_device(&smi_com_pdev->dev);
return -EPROBE_DEFER;
+ }
smi_com_dev = &smi_com_pdev->dev;
link = device_link_add(dev, smi_com_dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link) {
dev_err(dev, "Unable to link smi-common dev\n");
+ put_device(&smi_com_pdev->dev);
return -ENODEV;
}
*com_dev = smi_com_dev;
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 4733e7898ffe..c491cd549644 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1187,33 +1187,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_row)
- return -ENOMEM;
+ if (!dmc->timing_row) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_data)
- return -ENOMEM;
+ if (!dmc->timing_data) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
sizeof(u32), GFP_KERNEL);
- if (!dmc->timing_power)
- return -ENOMEM;
+ if (!dmc->timing_power) {
+ ret = -ENOMEM;
+ goto put_node;
+ }
dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
DDR_TYPE_LPDDR3,
&dmc->timings_arr_size);
if (!dmc->timings) {
- of_node_put(np_ddr);
dev_warn(dmc->dev, "could not get timings from DT\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
if (!dmc->min_tck) {
- of_node_put(np_ddr);
dev_warn(dmc->dev, "could not get tck from DT\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_node;
}
/* Sorted array of OPPs with frequency ascending */
@@ -1227,13 +1233,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc)
clk_period_ps);
}
- of_node_put(np_ddr);
/* Take the highest frequency's timings as 'bypass' */
dmc->bypass_timing_row = dmc->timing_row[idx - 1];
dmc->bypass_timing_data = dmc->timing_data[idx - 1];
dmc->bypass_timing_power = dmc->timing_power[idx - 1];
+put_node:
+ of_node_put(np_ddr);
return ret;
}
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index d6cd5537126c..69f9b0336410 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev)
clk_disable_unprepare(ssc->clk);
ssc->irq = platform_get_irq(pdev, 0);
- if (!ssc->irq) {
+ if (ssc->irq < 0) {
dev_dbg(&pdev->dev, "could not get irq\n");
- return -ENXIO;
+ return ssc->irq;
}
mutex_lock(&user_lock);
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 749cc5a46d13..b1e76030cafd 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -407,6 +407,8 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
// default
setting_reg1 = PCR_SETTING_REG1;
setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ return;
}
pci_read_config_dword(pdev, setting_reg2, &lval2);
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 1ef9b61077c4..f150d8769f19 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -631,16 +631,20 @@ static int rtsx_usb_probe(struct usb_interface *intf,
ucr->pusb_dev = usb_dev;
- ucr->iobuf = usb_alloc_coherent(ucr->pusb_dev, IOBUF_SIZE,
- GFP_KERNEL, &ucr->iobuf_dma);
- if (!ucr->iobuf)
+ ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
+ if (!ucr->cmd_buf)
return -ENOMEM;
+ ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
+ if (!ucr->rsp_buf) {
+ ret = -ENOMEM;
+ goto out_free_cmd_buf;
+ }
+
usb_set_intfdata(intf, ucr);
ucr->vendor_id = id->idVendor;
ucr->product_id = id->idProduct;
- ucr->cmd_buf = ucr->rsp_buf = ucr->iobuf;
mutex_init(&ucr->dev_mutex);
@@ -668,8 +672,11 @@ static int rtsx_usb_probe(struct usb_interface *intf,
out_init_fail:
usb_set_intfdata(ucr->pusb_intf, NULL);
- usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
- ucr->iobuf_dma);
+ kfree(ucr->rsp_buf);
+ ucr->rsp_buf = NULL;
+out_free_cmd_buf:
+ kfree(ucr->cmd_buf);
+ ucr->cmd_buf = NULL;
return ret;
}
@@ -682,8 +689,12 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
mfd_remove_devices(&intf->dev);
usb_set_intfdata(ucr->pusb_intf, NULL);
- usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
- ucr->iobuf_dma);
+
+ kfree(ucr->cmd_buf);
+ ucr->cmd_buf = NULL;
+
+ kfree(ucr->rsp_buf);
+ ucr->rsp_buf = NULL;
}
#ifdef CONFIG_PM
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 8d169a35cf13..bdffc6543f6f 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -79,6 +79,10 @@ static int at25_ee_read(void *priv, unsigned int offset,
{
struct at25_data *at25 = priv;
char *buf = val;
+ size_t max_chunk = spi_max_transfer_size(at25->spi);
+ unsigned int msg_offset = offset;
+ size_t bytes_left = count;
+ size_t segment;
u8 *cp;
ssize_t status;
struct spi_transfer t[2];
@@ -92,54 +96,58 @@ static int at25_ee_read(void *priv, unsigned int offset,
if (unlikely(!count))
return -EINVAL;
- cp = at25->command;
+ do {
+ segment = min(bytes_left, max_chunk);
+ cp = at25->command;
- instr = AT25_READ;
- if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
- if (offset >= BIT(at25->addrlen * 8))
- instr |= AT25_INSTR_BIT3;
+ instr = AT25_READ;
+ if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
+ if (msg_offset >= BIT(at25->addrlen * 8))
+ instr |= AT25_INSTR_BIT3;
- mutex_lock(&at25->lock);
+ mutex_lock(&at25->lock);
- *cp++ = instr;
-
- /* 8/16/24-bit address is written MSB first */
- switch (at25->addrlen) {
- default: /* case 3 */
- *cp++ = offset >> 16;
- fallthrough;
- case 2:
- *cp++ = offset >> 8;
- fallthrough;
- case 1:
- case 0: /* can't happen: for better code generation */
- *cp++ = offset >> 0;
- }
+ *cp++ = instr;
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
+ /* 8/16/24-bit address is written MSB first */
+ switch (at25->addrlen) {
+ default: /* case 3 */
+ *cp++ = msg_offset >> 16;
+ fallthrough;
+ case 2:
+ *cp++ = msg_offset >> 8;
+ fallthrough;
+ case 1:
+ case 0: /* can't happen: for better code generation */
+ *cp++ = msg_offset >> 0;
+ }
- t[0].tx_buf = at25->command;
- t[0].len = at25->addrlen + 1;
- spi_message_add_tail(&t[0], &m);
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
- t[1].rx_buf = buf;
- t[1].len = count;
- spi_message_add_tail(&t[1], &m);
+ t[0].tx_buf = at25->command;
+ t[0].len = at25->addrlen + 1;
+ spi_message_add_tail(&t[0], &m);
- /*
- * Read it all at once.
- *
- * REVISIT that's potentially a problem with large chips, if
- * other devices on the bus need to be accessed regularly or
- * this chip is clocked very slowly.
- */
- status = spi_sync(at25->spi, &m);
- dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n",
- count, offset, status);
+ t[1].rx_buf = buf;
+ t[1].len = segment;
+ spi_message_add_tail(&t[1], &m);
- mutex_unlock(&at25->lock);
- return status;
+ status = spi_sync(at25->spi, &m);
+
+ mutex_unlock(&at25->lock);
+
+ if (status)
+ return status;
+
+ msg_offset += segment;
+ buf += segment;
+ bytes_left -= segment;
+ } while (bytes_left > 0);
+
+ dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n",
+ count, offset);
+ return 0;
}
/* Read extra registers as ID or serial number */
@@ -190,6 +198,7 @@ ATTRIBUTE_GROUPS(sernum);
static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at25_data *at25 = priv;
+ size_t maxsz = spi_max_transfer_size(at25->spi);
const char *buf = val;
int status = 0;
unsigned buf_size;
@@ -218,7 +227,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
do {
unsigned long timeout, retries;
unsigned segment;
- unsigned offset = (unsigned) off;
+ unsigned offset = off;
u8 *cp = bounce;
int sr;
u8 instr;
@@ -253,6 +262,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
segment = buf_size - (offset % buf_size);
if (segment > count)
segment = count;
+ if (segment > maxsz)
+ segment = maxsz;
memcpy(cp, buf, segment);
status = spi_write(at25->spi, bounce,
segment + at25->addrlen + 1);
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 2e0aa74ac185..95ef971b5e1c 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
-KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
-KCOV_INSTRUMENT_rodata.o := n
-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
+
+KASAN_SANITIZE_rodata.o := n
+KCSAN_SANITIZE_rodata.o := n
+KCOV_INSTRUMENT_rodata.o := n
+OBJECT_FILES_NON_STANDARD_rodata.o := y
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index cebcca6d6d3e..cf2b8261da14 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1351,7 +1351,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CAP_SETUP) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
return 0;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 64ce3f830262..15e8e2b322b1 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -109,6 +109,8 @@
#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */
+#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 9870bf717979..befa491e3344 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1154,6 +1154,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
ret = mei_me_d0i3_exit_sync(dev);
if (ret)
return ret;
+ } else {
+ hw->pg_state = MEI_PG_OFF;
}
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 33e58821e478..5435604327a7 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,6 +116,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1259ca22d625..f4a1281658db 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
err = mmc_cqe_recovery(host);
if (err)
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
- else
- mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 195dc897188b..9da4489dc345 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1356,7 +1356,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq)
msdc_request_done(host, mrq);
}
-static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
+static void msdc_data_xfer_done(struct msdc_host *host, u32 events,
struct mmc_request *mrq, struct mmc_data *data)
{
struct mmc_command *stop;
@@ -1376,7 +1376,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
spin_unlock_irqrestore(&host->lock, flags);
if (done)
- return true;
+ return;
stop = data->stop;
if (check_data || (stop && stop->error)) {
@@ -1385,12 +1385,15 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP,
1);
+ ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val,
+ !(val & MSDC_DMA_CTRL_STOP), 1, 20000);
+ if (ret)
+ dev_dbg(host->dev, "DMA stop timed out\n");
+
ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val,
!(val & MSDC_DMA_CFG_STS), 1, 20000);
- if (ret) {
- dev_dbg(host->dev, "DMA stop timed out\n");
- return false;
- }
+ if (ret)
+ dev_dbg(host->dev, "DMA inactive timed out\n");
sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask);
dev_dbg(host->dev, "DMA stop\n");
@@ -1415,9 +1418,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
}
msdc_data_xfer_next(host, mrq);
- done = true;
}
- return done;
}
static void msdc_set_buswidth(struct msdc_host *host, u32 width)
@@ -2416,6 +2417,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val,
+ !(val & MSDC_DMA_CTRL_STOP), 1, 3000)))
+ return;
if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
!(val & MSDC_DMA_CFG_STS), 1, 3000)))
return;
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 86e867ffbb10..033be559a730 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/*
* omap_device_pm_domain has callbacks to enable the main
* functional clock, interface clock and also configure the
- * SYSCONFIG register of omap devices. The callback will be invoked
- * as part of pm_runtime_get_sync.
+ * SYSCONFIG register to clear any boot loader set voltage
+ * capabilities before calling sdhci_setup_host(). The
+ * callback will be invoked as part of pm_runtime_get_sync.
*/
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
@@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_runtime_suspend_host(host);
+ if (omap_host->con != -EINVAL)
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
@@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- if (omap_host->con != -EINVAL)
+ if (omap_host->con != -EINVAL) {
sdhci_omap_context_restore(omap_host);
-
- sdhci_runtime_resume_host(host, 0);
+ sdhci_runtime_resume_host(host, 0);
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 1499a64ec3aa..f13c08db3da5 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
struct sdhci_host *host = slot->host;
u16 clock;
+ if (host->mmc->ios.power_mode != MMC_POWER_ON)
+ return 0;
+
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clock |= SDHCI_CLOCK_PLL_EN;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 92c20cb8074a..0d4d343dbb77 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -152,6 +152,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc)
if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS))
sdhci_o2_enable_internal_clock(host);
+ else
+ sdhci_o2_wait_card_detect_stable(host);
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 0b68d05846e1..93da23682d86 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
unsigned int tRP_ps;
bool use_half_period;
int sample_delay_ps, sample_delay_factor;
- u16 busy_timeout_cycles;
+ unsigned int busy_timeout_cycles;
u8 wrn_dly_sel;
unsigned long clk_rate, min_rate;
+ u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) {
/* ONFI non-EDO modes [0-3] */
@@ -885,12 +886,13 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
- hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
+ hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096));
/*
* Derive NFC ideal delay from {3}:
diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c
index 88c2440b47d8..dacc5529b3df 100644
--- a/drivers/mtd/nand/raw/nand_ids.c
+++ b/drivers/mtd/nand/raw/nand_ids.c
@@ -29,9 +29,6 @@ struct nand_flash_dev nand_flash_ids[] = {
{"TC58NVG0S3E 1G 3.3V 8-bit",
{ .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
- {"TC58NVG0S3HTA00 1G 3.3V 8-bit",
- { .id = {0x98, 0xf1, 0x80, 0x15} },
- SZ_2K, SZ_128, SZ_128K, 0, 4, 128, NAND_ECC_INFO(8, SZ_512), },
{"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b2a4f998c180..8c1eeb5a8db8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -94,6 +94,7 @@ config WIREGUARD
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
select CRYPTO_POLY1305_MIPS if MIPS
+ select CRYPTO_CHACHA_S390 if S390
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index ebee5f07a208..e019526e1df6 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -51,6 +51,7 @@ static char *status_str[] = {
};
static char *type_str[] = {
+ "", /* Type 0 is not defined */
"AMT_MSG_DISCOVERY",
"AMT_MSG_ADVERTISEMENT",
"AMT_MSG_REQUEST",
@@ -562,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
ihv3->nsrcs = 0;
ihv3->resv = 0;
ihv3->suppress = false;
- ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
ihv3->csum = 0;
csum = &ihv3->csum;
csum_start = (void *)ihv3;
@@ -576,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
return skb;
}
-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
{
if (validate && amt->status >= status)
return;
netdev_dbg(amt->dev, "Update GW status %s -> %s",
status_str[amt->status], status_str[status]);
- amt->status = status;
+ WRITE_ONCE(amt->status, status);
}
static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
@@ -599,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
tunnel->status = status;
}
-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
-{
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, status, validate);
- spin_unlock_bh(&amt->lock);
-}
-
static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
enum amt_status status, bool validate)
{
@@ -699,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt)
if (unlikely(net_xmit_eval(err)))
amt->dev->stats.tx_errors++;
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
- spin_unlock_bh(&amt->lock);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
out:
rcu_read_unlock();
}
@@ -899,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
}
#endif
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
+ struct sk_buff *skb)
+{
+ int index;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events >= AMT_MAX_EVENTS) {
+ spin_unlock_bh(&amt->lock);
+ return 1;
+ }
+
+ index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
+ amt->events[index].event = event;
+ amt->events[index].skb = skb;
+ amt->nr_events++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ queue_work(amt_wq, &amt->event_wq);
+ spin_unlock_bh(&amt->lock);
+
+ return 0;
+}
+
static void amt_secret_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
@@ -912,58 +925,72 @@ static void amt_secret_work(struct work_struct *work)
msecs_to_jiffies(AMT_SECRET_TIMEOUT));
}
-static void amt_discovery_work(struct work_struct *work)
+static void amt_event_send_discovery(struct amt_dev *amt)
{
- struct amt_dev *amt = container_of(to_delayed_work(work),
- struct amt_dev,
- discovery_wq);
-
- spin_lock_bh(&amt->lock);
if (amt->status > AMT_STATUS_SENT_DISCOVERY)
goto out;
get_random_bytes(&amt->nonce, sizeof(__be32));
- spin_unlock_bh(&amt->lock);
amt_send_discovery(amt);
- spin_lock_bh(&amt->lock);
out:
mod_delayed_work(amt_wq, &amt->discovery_wq,
msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
- spin_unlock_bh(&amt->lock);
}
-static void amt_req_work(struct work_struct *work)
+static void amt_discovery_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
struct amt_dev,
- req_wq);
+ discovery_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_event_send_request(struct amt_dev *amt)
+{
u32 exp;
- spin_lock_bh(&amt->lock);
if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
goto out;
if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
netdev_dbg(amt->dev, "Gateway is not ready");
amt->qi = AMT_INIT_REQ_TIMEOUT;
- amt->ready4 = false;
- amt->ready6 = false;
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
amt->remote_ip = 0;
- __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt_update_gw_status(amt, AMT_STATUS_INIT, false);
amt->req_cnt = 0;
+ amt->nonce = 0;
goto out;
}
- spin_unlock_bh(&amt->lock);
+
+ if (!amt->req_cnt) {
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
+ get_random_bytes(&amt->nonce, sizeof(__be32));
+ }
amt_send_request(amt, false);
amt_send_request(amt, true);
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
- spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
+ mod_delayed_work(amt_wq, &amt->req_wq,
+ msecs_to_jiffies(100));
}
static bool amt_send_membership_update(struct amt_dev *amt,
@@ -1219,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* Gateway only passes IGMP/MLD packets */
if (!report)
goto free;
- if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+ if ((!v6 && !READ_ONCE(amt->ready4)) ||
+ (v6 && !READ_ONCE(amt->ready6)))
goto free;
if (amt_send_membership_update(amt, skb, v6))
goto free;
@@ -2220,8 +2248,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
struct amt_header_advertisement *amta;
int hdr_size;
- hdr_size = sizeof(*amta) - sizeof(struct amt_header);
-
+ hdr_size = sizeof(*amta) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2236,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
ipv4_is_zeronet(amta->ip4))
return true;
+ if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
+ amt->nonce != amta->nonce)
+ return true;
+
amt->remote_ip = amta->ip4;
netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
mod_delayed_work(amt_wq, &amt->req_wq, 0);
@@ -2251,19 +2282,30 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
struct ethhdr *eth;
struct iphdr *iph;
+ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
+ return true;
+
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
if (amtmd->reserved || amtmd->version)
return true;
- hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
return true;
+
skb_reset_network_header(skb);
skb_push(skb, sizeof(*eth));
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = eth_hdr(skb);
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
iph = ip_hdr(skb);
+
if (iph->version == 4) {
if (!ipv4_is_multicast(iph->daddr))
return true;
@@ -2274,6 +2316,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
} else if (iph->version == 6) {
struct ipv6hdr *ip6h;
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return true;
+
ip6h = ipv6_hdr(skb);
if (!ipv6_addr_is_multicast(&ip6h->daddr))
return true;
@@ -2306,8 +2351,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
struct iphdr *iph;
int hdr_size, len;
- hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
-
+ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2315,54 +2359,66 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
if (amtmq->reserved || amtmq->version)
return true;
- hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
+ if (amtmq->nonce != amt->nonce)
+ return true;
+
+ hdr_size -= sizeof(*eth);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
return true;
+
oeth = eth_hdr(skb);
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
skb_reset_network_header(skb);
eth = eth_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
iph = ip_hdr(skb);
if (iph->version == 4) {
- if (!ipv4_is_multicast(iph->daddr))
+ if (READ_ONCE(amt->ready4))
return true;
+
if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
sizeof(*ihv3)))
return true;
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+
ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready4 = true;
+ WRITE_ONCE(amt->ready4, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = ihv3->qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IP);
eth->h_proto = htons(ETH_P_IP);
ip_eth_mc_map(iph->daddr, eth->h_dest);
#if IS_ENABLED(CONFIG_IPV6)
} else if (iph->version == 6) {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld2_query *mld2q;
+ struct ipv6hdr *ip6h;
- if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ if (READ_ONCE(amt->ready6))
return true;
+
if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
sizeof(*mld2q)))
return true;
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+
mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready6 = true;
+ WRITE_ONCE(amt->ready6, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = mld2q->mld2q_qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IPV6);
eth->h_proto = htons(ETH_P_IPV6);
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
@@ -2375,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
+ local_bh_disable();
if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
amt->dev->stats.rx_dropped++;
}
+ local_bh_enable();
return false;
}
@@ -2389,23 +2447,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
{
struct amt_header_membership_update *amtmu;
struct amt_tunnel_list *tunnel;
- struct udphdr *udph;
struct ethhdr *eth;
struct iphdr *iph;
- int len;
+ int len, hdr_size;
iph = ip_hdr(skb);
- udph = udp_hdr(skb);
- if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
- false, false))
+ hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
return true;
- amtmu = (struct amt_header_membership_update *)skb->data;
+ amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
if (amtmu->reserved || amtmu->version)
return true;
- skb_pull(skb, sizeof(*amtmu));
+ if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
+ return true;
+
skb_reset_network_header(skb);
list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
@@ -2426,6 +2484,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
return true;
report:
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
iph = ip_hdr(skb);
if (iph->version == 4) {
if (ip_mc_check_igmp(skb)) {
@@ -2618,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
if (tunnel->ip4 == iph->saddr)
goto send;
+ spin_lock_bh(&amt->lock);
if (amt->nr_tunnels >= amt->max_tunnels) {
+ spin_unlock_bh(&amt->lock);
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
return true;
}
@@ -2626,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
tunnel = kzalloc(sizeof(*tunnel) +
(sizeof(struct hlist_head) * amt->hash_buckets),
GFP_ATOMIC);
- if (!tunnel)
+ if (!tunnel) {
+ spin_unlock_bh(&amt->lock);
return true;
+ }
tunnel->source_port = udph->source;
tunnel->ip4 = iph->saddr;
@@ -2640,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
- spin_lock_bh(&amt->lock);
list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
tunnel->key = amt->key;
- amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
amt->nr_tunnels++;
mod_delayed_work(amt_wq, &tunnel->gc_wq,
msecs_to_jiffies(amt_gmi(amt)));
@@ -2668,6 +2732,38 @@ send:
return false;
}
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
+{
+ int type = amt_parse_type(skb);
+ int err = 1;
+
+ if (type == -1)
+ goto drop;
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ err = amt_advertisement_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ err = amt_membership_query_handler(amt, skb);
+ if (!err)
+ return;
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+}
+
static int amt_rcv(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2679,7 +2775,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
amt = rcu_dereference_sk_user_data(sk);
if (!amt) {
err = true;
- goto drop;
+ kfree_skb(skb);
+ goto out;
}
skb->dev = amt->dev;
@@ -2698,8 +2795,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- err = amt_advertisement_handler(amt, skb);
- break;
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
+ goto drop;
+ }
+ goto out;
case AMT_MSG_MULTICAST_DATA:
if (iph->saddr != amt->remote_ip) {
netdev_dbg(amt->dev, "Invalid Relay IP\n");
@@ -2717,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- err = amt_membership_query_handler(amt, skb);
- if (err)
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
goto drop;
- else
- goto out;
+ }
+ goto out;
default:
err = true;
netdev_dbg(amt->dev, "Invalid type of Gateway\n");
@@ -2759,6 +2861,46 @@ out:
return 0;
}
+static void amt_event_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
+ struct sk_buff *skb;
+ u8 event;
+ int i;
+
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events == 0) {
+ spin_unlock_bh(&amt->lock);
+ return;
+ }
+ event = amt->events[amt->event_idx].event;
+ skb = amt->events[amt->event_idx].skb;
+ amt->events[amt->event_idx].event = AMT_EVENT_NONE;
+ amt->events[amt->event_idx].skb = NULL;
+ amt->nr_events--;
+ amt->event_idx++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ spin_unlock_bh(&amt->lock);
+
+ switch (event) {
+ case AMT_EVENT_RECEIVE:
+ amt_gw_rcv(amt, skb);
+ break;
+ case AMT_EVENT_SEND_DISCOVERY:
+ amt_event_send_discovery(amt);
+ break;
+ case AMT_EVENT_SEND_REQUEST:
+ amt_event_send_request(amt);
+ break;
+ default:
+ if (skb)
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2783,7 +2925,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
break;
case AMT_MSG_REQUEST:
case AMT_MSG_MEMBERSHIP_UPDATE:
- if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
mod_delayed_work(amt_wq, &amt->req_wq, 0);
break;
default:
@@ -2846,6 +2988,8 @@ static int amt_dev_open(struct net_device *dev)
amt->ready4 = false;
amt->ready6 = false;
+ amt->event_idx = 0;
+ amt->nr_events = 0;
err = amt_socket_create(amt);
if (err)
@@ -2853,6 +2997,7 @@ static int amt_dev_open(struct net_device *dev)
amt->req_cnt = 0;
amt->remote_ip = 0;
+ amt->nonce = 0;
get_random_bytes(&amt->key, sizeof(siphash_key_t));
amt->status = AMT_STATUS_INIT;
@@ -2871,6 +3016,8 @@ static int amt_dev_stop(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
struct amt_tunnel_list *tunnel, *tmp;
struct socket *sock;
+ struct sk_buff *skb;
+ int i;
cancel_delayed_work_sync(&amt->req_wq);
cancel_delayed_work_sync(&amt->discovery_wq);
@@ -2883,6 +3030,15 @@ static int amt_dev_stop(struct net_device *dev)
if (sock)
udp_tunnel_sock_release(sock);
+ cancel_work_sync(&amt->event_wq);
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ skb = amt->events[i].skb;
+ if (skb)
+ kfree_skb(skb);
+ amt->events[i].event = AMT_EVENT_NONE;
+ amt->events[i].skb = NULL;
+ }
+
amt->ready4 = false;
amt->ready6 = false;
amt->req_cnt = 0;
@@ -3074,7 +3230,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
goto err;
}
if (amt->mode == AMT_MODE_RELAY) {
- amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
amt->qri = 10;
dev->needed_headroom = amt->stream_dev->needed_headroom +
AMT_RELAY_HLEN;
@@ -3125,8 +3281,8 @@ static int amt_newlink(struct net *net, struct net_device *dev,
INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_WORK(&amt->event_wq, amt_event_work);
INIT_LIST_HEAD(&amt->tunnel_list);
-
return 0;
err:
dev_put(amt->stream_dev);
@@ -3259,7 +3415,7 @@ static int __init amt_init(void)
if (err < 0)
goto unregister_notifier;
- amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
if (!amt_wq) {
err = -ENOMEM;
goto rtnl_unregister;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a86b1f71762e..d7fb33c078e8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2228,7 +2228,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
- ad_clear_agg(temp_aggregator);
+ if (temp_aggregator->num_of_ports == 0)
+ ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 303c8d32d451..007d43e46dcb 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1302,12 +1302,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
return res;
if (rlb_enabled) {
- bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
+ bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f85372adf042..6ba4c83fe5fc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3684,9 +3684,11 @@ re_arm:
if (!rtnl_trylock())
return;
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 5458f57177a0..0b0f234b0b50 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -722,13 +722,21 @@ static int cfv_probe(struct virtio_device *vdev)
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
/* register Netdev */
- err = register_netdev(netdev);
+ err = register_netdevice(netdev);
if (err) {
+ rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
+ virtio_device_ready(vdev);
+
+ rtnl_unlock();
+
debugfs_init(cfv);
return 0;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 76df4807d366..4c47c1055eff 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1646,7 +1646,6 @@ static int grcan_probe(struct platform_device *ofdev)
*/
sysid_parent = of_find_node_by_path("/ambapp0");
if (sysid_parent) {
- of_node_get(sysid_parent);
err = of_property_read_u32(sysid_parent, "systemid", &sysid);
if (!err && ((sysid & GRLIB_VERSION_MASK) >=
GRCAN_TXBUG_SAFE_GRLIB_VERSION))
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 5d0c82d8b9a9..7931f9c71ef3 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -529,7 +529,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
/* acknowledge rx fifo 0 */
m_can_write(cdev, M_CAN_RXF0A, fgi);
- timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp);
@@ -1030,7 +1030,7 @@ static int m_can_echo_tx_event(struct net_device *dev)
}
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
- timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
/* ack txe element */
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
@@ -1351,7 +1351,9 @@ static void m_can_chip_config(struct net_device *dev)
/* enable internal timestamp generation, with a prescalar of 16. The
* prescalar is applied to the nominal bit timing
*/
- m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+ m_can_write(cdev, M_CAN_TSCC,
+ FIELD_PREP(TSCC_TCP_MASK, 0xf) |
+ FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
m_can_config_endisable(cdev, false);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 40a11445d021..cb0321ea853c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1332,7 +1332,10 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ if (is_v3u(gpriv))
+ rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
+ else
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
@@ -1840,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
of_child = of_get_child_by_name(pdev->dev.of_node, name);
if (of_child && of_device_is_available(of_child))
channels_mask |= BIT(i);
+ of_node_put(of_child);
}
if (chip_id != RENESAS_RZG2L) {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index b21252390216..bc6518504fd4 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -12,6 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -1650,6 +1651,7 @@ static int mcp251xfd_stop(struct net_device *ndev)
netif_stop_queue(ndev);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
hrtimer_cancel(&priv->rx_irq_timer);
+ hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
@@ -1688,8 +1690,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
u32 osc;
int err;
- /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
- * autodetect the model.
+ /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863,
+ * so use it to autodetect the model.
*/
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
MCP251XFD_REG_OSC_LPMEN,
@@ -1701,10 +1703,18 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
if (err)
return err;
- if (osc & MCP251XFD_REG_OSC_LPMEN)
- devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
- else
+ if (osc & MCP251XFD_REG_OSC_LPMEN) {
+ /* We cannot distinguish between MCP2518FD and
+ * MCP251863. If firmware specifies MCP251863, keep
+ * it, otherwise set to MCP2518FD.
+ */
+ if (mcp251xfd_is_251863(priv))
+ devtype_data = &mcp251xfd_devtype_data_mcp251863;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ } else {
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+ }
if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
@@ -1777,7 +1787,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
xfer[0].len = sizeof(buf_tx->cmd);
xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
- xfer[1].len = sizeof(dev_id);
+ xfer[1].len = sizeof(*dev_id);
xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
@@ -1786,7 +1796,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
if (err)
goto out_kfree_buf_tx;
- *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
+ *dev_id = get_unaligned_le32(buf_rx->data);
*effective_speed_hz_slow = xfer[0].effective_speed_hz;
*effective_speed_hz_fast = xfer[1].effective_speed_hz;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 217510c12af5..92b7bc7f14b9 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -334,19 +334,21 @@ mcp251xfd_regmap_crc_read(void *context,
* register. It increments once per SYS clock tick,
* which is 20 or 40 MHz.
*
- * Observation shows that if the lowest byte (which is
- * transferred first on the SPI bus) of that register
- * is 0x00 or 0x80 the calculated CRC doesn't always
- * match the transferred one.
+ * Observation on the mcp2518fd shows that if the
+ * lowest byte (which is transferred first on the SPI
+ * bus) of that register is 0x00 or 0x80 the
+ * calculated CRC doesn't always match the transferred
+ * one. On the mcp2517fd this problem is not limited
+ * to the first byte being 0x00 or 0x80.
*
* If the highest bit in the lowest byte is flipped
* the transferred CRC matches the calculated one. We
- * assume for now the CRC calculation in the chip
- * works on wrong data and the transferred data is
- * correct.
+ * assume for now the CRC operates on the correct
+ * data.
*/
if (reg == MCP251XFD_REG_TBC &&
- (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+ ((buf_rx->data[0] & 0xf8) == 0x0 ||
+ (buf_rx->data[0] & 0xf8) == 0x80)) {
/* Flip highest bit in lowest byte of le32 */
buf_rx->data[0] ^= 0x80;
@@ -356,10 +358,8 @@ mcp251xfd_regmap_crc_read(void *context,
val_len);
if (!err) {
/* If CRC is now correct, assume
- * transferred data was OK, flip bit
- * back to original value.
+ * flipped data is OK.
*/
- buf_rx->data[0] ^= 0x80;
goto out;
}
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b29ba9138866..d3a658b444b5 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -268,6 +268,8 @@ struct gs_can {
struct usb_anchor tx_submitted;
atomic_t active_tx_urbs;
+ void *rxbuf[GS_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
};
/* usb interface struct */
@@ -742,6 +744,7 @@ static int gs_can_open(struct net_device *netdev)
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
+ dma_addr_t buf_dma;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -752,7 +755,7 @@ static int gs_can_open(struct net_device *netdev)
buf = usb_alloc_coherent(dev->udev,
dev->parent->hf_size_rx,
GFP_KERNEL,
- &urb->transfer_dma);
+ &buf_dma);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -760,6 +763,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
}
+ urb->transfer_dma = buf_dma;
+
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
@@ -781,10 +786,17 @@ static int gs_can_open(struct net_device *netdev)
"usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ buf,
+ buf_dma);
usb_free_urb(urb);
break;
}
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
/* Drop reference,
* USB core will take care of freeing it
*/
@@ -842,13 +854,20 @@ static int gs_can_close(struct net_device *netdev)
int rc;
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
+ unsigned int i;
netif_stop_queue(netdev);
/* Stop polling */
parent->active_channels--;
- if (!parent->active_channels)
+ if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
+ for (i = 0; i < GS_MAX_RX_URBS; i++)
+ usb_free_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ dev->rxbuf[i],
+ dev->rxbuf_dma[i]);
+ }
/* Stop sending URBs */
usb_kill_anchored_urbs(&dev->tx_submitted);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 3a49257f9fa6..eefcbe3aadce 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -35,9 +35,10 @@
#define KVASER_USB_RX_BUFFER_SIZE 3072
#define KVASER_USB_MAX_NET_DEVICES 5
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -65,12 +66,7 @@ struct kvaser_usb_dev_card_data_hydra {
struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
- union {
- struct {
- enum kvaser_usb_leaf_family family;
- } leaf;
- struct kvaser_usb_dev_card_data_hydra hydra;
- };
+ struct kvaser_usb_dev_card_data_hydra hydra;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -83,7 +79,7 @@ struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
- const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_driver_info *driver_info;
const struct kvaser_usb_dev_cfg *cfg;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
@@ -165,6 +161,12 @@ struct kvaser_usb_dev_ops {
u16 transid);
};
+struct kvaser_usb_driver_info {
+ u32 quirks;
+ enum kvaser_usb_leaf_family family;
+ const struct kvaser_usb_dev_ops *ops;
+};
+
struct kvaser_usb_dev_cfg {
const struct can_clock clock;
const unsigned int timestamp_freq;
@@ -184,4 +186,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
int len);
int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index e67658b53d02..f211bfcb1d97 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -61,8 +61,6 @@
#define USB_USBCAN_R_V2_PRODUCT_ID 294
#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295
#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296
-#define USB_LEAF_PRODUCT_ID_END \
- USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
/* Kvaser USBCan-II devices product ids */
#define USB_USBCAN_REVB_PRODUCT_ID 2
@@ -89,116 +87,153 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
#define USB_HYBRID_CANLIN_PRODUCT_ID 277
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
-#define USB_HYDRA_PRODUCT_ID_END \
- USB_HYBRID_PRO_CANLIN_PRODUCT_ID
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
- (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_LEAF_PRODUCT_ID_END);
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+ .quirks = 0,
+ .ops = &kvaser_usb_hydra_dev_ops,
+};
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE,
+ .family = KVASER_USBCAN,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+ .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ /* Leaf M32C USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+ /* Leaf i.MX28 USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
/* Minihydra USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -285,6 +320,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
static void kvaser_usb_read_bulk_callback(struct urb *urb)
{
struct kvaser_usb *dev = urb->context;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
unsigned int i;
@@ -301,8 +337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
- urb->actual_length);
+ ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev,
@@ -396,6 +432,7 @@ static int kvaser_usb_open(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
err = open_candev(netdev);
@@ -406,11 +443,11 @@ static int kvaser_usb_open(struct net_device *netdev)
if (err)
goto error;
- err = dev->ops->dev_set_opt_mode(priv);
+ err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
- err = dev->ops->dev_start_chip(priv);
+ err = ops->dev_start_chip(priv);
if (err) {
netdev_warn(netdev, "Cannot start device, error %d\n", err);
goto error;
@@ -467,22 +504,23 @@ static int kvaser_usb_close(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
netif_stop_queue(netdev);
- err = dev->ops->dev_flush_queue(priv);
+ err = ops->dev_flush_queue(priv);
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n",
err);
}
- err = dev->ops->dev_stop_chip(priv);
+ err = ops->dev_stop_chip(priv);
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
@@ -521,6 +559,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
struct net_device_stats *stats = &netdev->stats;
struct kvaser_usb_tx_urb_context *context = NULL;
struct urb *urb;
@@ -563,8 +602,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len,
- context->echo_index);
+ buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index);
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
@@ -648,15 +686,16 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
}
}
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
- const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
{
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
+ const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+ const struct kvaser_usb_dev_ops *ops = driver_info->ops;
int err;
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, channel);
if (err)
return err;
}
@@ -685,20 +724,19 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
- priv->can.do_set_mode = dev->ops->dev_set_mode;
- if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ priv->can.do_set_bittiming = ops->dev_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
- priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
- if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming =
- dev->ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
@@ -729,29 +767,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
struct kvaser_usb *dev;
int err;
int i;
+ const struct kvaser_usb_driver_info *driver_info;
+ const struct kvaser_usb_dev_ops *ops;
+
+ driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+ if (!driver_info)
+ return -ENODEV;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- if (kvaser_is_leaf(id)) {
- dev->card_data.leaf.family = KVASER_LEAF;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_usbcan(id)) {
- dev->card_data.leaf.family = KVASER_USBCAN;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_hydra(id)) {
- dev->ops = &kvaser_usb_hydra_dev_ops;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) is not a supported Kvaser USB device\n",
- id->idProduct);
- return -ENODEV;
- }
-
dev->intf = intf;
+ dev->driver_info = driver_info;
+ ops = driver_info->ops;
- err = dev->ops->dev_setup_endpoints(dev);
+ err = ops->dev_setup_endpoints(dev);
if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
@@ -765,22 +796,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
- err = dev->ops->dev_init_card(dev);
+ err = ops->dev_init_card(dev);
if (err) {
dev_err(&intf->dev,
"Failed to initialize card, error %d\n", err);
return err;
}
- err = dev->ops->dev_get_software_info(dev);
+ err = ops->dev_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_software_details) {
- err = dev->ops->dev_get_software_details(dev);
+ if (ops->dev_get_software_details) {
+ err = ops->dev_get_software_details(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software details, error %d\n", err);
@@ -798,14 +829,14 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
- err = dev->ops->dev_get_card_info(dev);
+ err = ops->dev_get_card_info(dev);
if (err) {
dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_capabilities) {
- err = dev->ops->dev_get_capabilities(dev);
+ if (ops->dev_get_capabilities) {
+ err = ops->dev_get_capabilities(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get capabilities, error %d\n", err);
@@ -815,7 +846,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
}
for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(dev, id, i);
+ err = kvaser_usb_init_one(dev, i);
if (err) {
kvaser_usb_remove_interfaces(dev);
return err;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index a26823c5b62a..5d70844ac030 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -375,7 +375,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.brp_inc = 1,
};
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
.name = "kvaser_usb_flex",
.tseg1_min = 4,
.tseg1_max = 16,
@@ -2052,7 +2052,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index c805b999c543..cc809ecd1e62 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -101,16 +101,6 @@
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
@@ -343,48 +333,68 @@ struct kvaser_usb_err_summary {
};
};
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 16,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+ .name = "kvaser_usb_leaf",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 2,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.clock = {
.freq = 8 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static void *
@@ -404,7 +414,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
@@ -524,16 +534,23 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
- switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
- case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
- break;
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+ */
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ } else {
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+ break;
+ }
}
}
@@ -550,7 +567,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
if (err)
return err;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
@@ -558,7 +575,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
- dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
break;
}
@@ -597,7 +614,7 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
- (dev->card_data.leaf.family == KVASER_USBCAN &&
+ (dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
@@ -730,7 +747,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
@@ -809,7 +826,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -999,7 +1016,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->card_data.leaf.family == KVASER_LEAF &&
+ (dev->driver_info->family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
@@ -1015,7 +1032,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
break;
@@ -1030,7 +1047,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -1128,14 +1145,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
break;
case CMD_LEAF_LOG_MESSAGE:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- if (dev->card_data.leaf.family == KVASER_LEAF)
+ if (dev->driver_info->family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1147,12 +1164,12 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->card_data.leaf.family != KVASER_USBCAN)
+ if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 8a3b7b103ca4..e179d311aa28 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -258,7 +258,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
@@ -271,7 +271,7 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 87e81c636339..be0edfa093d0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -878,6 +878,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
if (duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
core_writel(priv, reg, offset);
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 2572c6087bb5..b28baab6d56a 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -300,6 +300,7 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
const char *label, *state;
int ret = -EINVAL;
+ of_node_get(hellcreek->dev->of_node);
leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
if (!leds) {
dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 8af4def38a98..e531b93f3cb2 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
- if (err)
+ if (err) {
+ of_node_put(gphy_fw_np);
goto remove_gphy;
+ }
i++;
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 9ca8c8d7740f..92a500e1ccd2 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1038,18 +1038,21 @@ int ksz_switch_register(struct ksz_device *dev,
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
- if (ports)
+ if (ports) {
for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port);
+ of_node_put(ports);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
}
+ of_node_put(ports);
+ }
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 7b37d45bc9fb..d94150d8f3f4 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 ctrl, u16 status, u16 lpa,
+ u16 bmsr, u16 lpa, u16 status,
struct phylink_link_state *state)
{
+ state->link = false;
+
+ /* If the BMSR reports that the link had failed, report this to
+ * phylink.
+ */
+ if (!(bmsr & BMSR_LSTATUS))
+ return 0;
+
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
- * still get here on link up. But we want to set an_complete
- * only if AN was enabled, thus we look at BMCR_ANENABLE.
- * (According to 802.3-2008 section 22.2.4.2.10, we should be
- * able to get this same value from BMSR_ANEGCAPABLE, but tests
- * show that these Marvell PHYs don't conform to this part of
- * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ * still get here on link up.
*/
- state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &ctrl);
+ MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 570d0204b7be..9c27b9b0128d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1886,6 +1886,8 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
struct felix_stream_filter_counters *counters)
{
+ mutex_lock(&ocelot->stats_lock);
+
ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
SYS_STAT_CFG_STAT_VIEW_M,
SYS_STAT_CFG);
@@ -1900,6 +1902,8 @@ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
SYS_STAT_CFG_STAT_VIEW(index) |
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
SYS_STAT_CFG);
+
+ mutex_unlock(&ocelot->stats_lock);
}
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 2727d3169c25..1cbb05b0323f 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -2334,6 +2334,7 @@ static int
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
+ int ret;
/* We have only have a general MTU setting.
* DSA always set the CPU port's MTU to the largest MTU of the slave
@@ -2344,8 +2345,27 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
if (!dsa_is_cpu_port(ds, port))
return 0;
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
/* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
}
static int
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 04408e11402a..ec58d0e80a70 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -15,7 +15,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 100
+#define QCA8K_ETHERNET_TIMEOUT 5
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 3bb42a9f236d..769f672e9128 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
return 0;
}
-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
- phy_interface_t interface)
-{
- int ext_int;
-
- ext_int = rtl8365mb_extint_port_map[port];
-
- if (ext_int < 0 &&
- (interface == PHY_INTERFACE_MODE_NA ||
- interface == PHY_INTERFACE_MODE_INTERNAL ||
- interface == PHY_INTERFACE_MODE_GMII))
- /* Internal PHY */
- return true;
- else if ((ext_int >= 1) &&
- phy_interface_mode_is_rgmii(interface))
- /* Extension MAC */
- return true;
-
- return false;
-}
-
static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- if (dsa_is_user_port(ds, port))
+ if (dsa_is_user_port(ds, port)) {
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
- else if (dsa_is_cpu_port(ds, port))
+
+ /* GMII is the default interface mode for phylib, so
+ * we have to support it for ports with integrated PHY.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ } else if (dsa_is_cpu_port(ds, port)) {
phy_interface_set_rgmii(config->supported_interfaces);
+ }
config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
@@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
struct realtek_priv *priv = ds->priv;
int ret;
- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 72b6fc1932b5..698c7d1fb45c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3382,12 +3382,28 @@ static const struct of_device_id sja1105_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
+ .id_table = sja1105_spi_ids,
.probe = sja1105_probe,
.remove = sja1105_remove,
.shutdown = sja1105_shutdown,
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 3110895358d8..97a92e6da60d 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -205,10 +205,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+ { "vsc7385" },
+ { "vsc7388" },
+ { "vsc7395" },
+ { "vsc7398" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown,
+ .id_table = vsc73xx_spi_ids,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a3816264c35c..8c5828582c21 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index c6f003975621..d5f2c6989221 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev)
pr_cont("\n");
}
}
- prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE;
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
wmb(); /* drain writebuffer */
@@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
ps->tx_packets++;
ps->tx_bytes += ptxd->len;
- ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE;
wmb(); /* drain writebuffer */
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
@@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0);
+ aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
@@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev)
for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
pDB->pnext = pDBfree;
pDBfree = pDB;
- pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
+ pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i;
pDB++;
}
aup->pDBfree = pDBfree;
@@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->rx_db_inuse[i] = pDB;
}
@@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->tx_dma_ring[i]->len = 0;
aup->tx_db_inuse[i] = pDB;
}
@@ -1310,7 +1310,7 @@ err_remap2:
iounmap(aup->mac);
err_remap1:
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
err_vaddr:
free_netdev(dev);
err_alloc:
@@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev)
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
iounmap(aup->macdma);
iounmap(aup->mac);
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index e3a3ed29db61..2489c2f4fd8a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -106,8 +106,8 @@ struct au1000_private {
struct mac_reg *mac; /* mac registers */
u32 *enable; /* address of MAC Enable Register */
void __iomem *macdma; /* base of MAC DMA port */
- u32 vaddr; /* virtual address of rx/tx buffers */
- dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+ void *vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a3593290886f..4d46780fad13 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
for (i = 0; i < skb->len; i += 32) {
unsigned int len = min(skb->len - i, 32U);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4ebd2410185a..4d790a89fe77 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ phy_irqnum = platform_irq_count(pdev) - 1;
dma_irqnum = 1;
dma_irqend = phy_irqnum;
} else {
@@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
phy_memnum = 0;
phy_irqnum = 0;
dma_irqnum = 1;
- dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+ dma_irqend = platform_irq_count(pdev);
}
/* Obtain the mmio areas for the device */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 831833911a52..8647125d60ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_suspend_common(struct device *dev, bool deep)
+static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
if (netif_running(nic->ndev))
aq_nic_stop(nic);
- if (deep) {
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- aq_nic_set_power(nic);
- }
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
rtnl_unlock();
return 0;
}
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (deep) {
- /* Reinitialize Nic/Vecs objects */
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- }
-
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -444,22 +437,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 086739e4f40a..9b83d5361699 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
np = of_get_child_by_name(core->dev.of_node, "mdio");
err = of_mdiobus_register(mii_bus, np);
+ of_node_put(np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index e6f48786949c..02bd3cf9a260 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
- kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 56b46b8206a7..cf9b00576ed3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7790,7 +7790,7 @@ hwrm_dbg_qcaps_exit:
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
@@ -10065,7 +10065,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
- if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+ test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
else
bnxt_remap_fw_health_regs(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a1dca8c58f54..075c6206325c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2314,6 +2314,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 3528ce9849e6..6b3d4f4c2a75 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -979,9 +979,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
- if (rc)
- return rc;
+ if (BNXT_CHIP_P5(bp)) {
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ }
return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 562f8f68a47d..7f3c0875b6f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -76,14 +76,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 high_before, high_now, low;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return -EIO;
+ high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
ptp_read_system_prets(sts);
- *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ if (high_now != high_before) {
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ }
+ *ns = ((u64)high_now << 32) | low;
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ddf2f3963abe..a1a2c7a64fd5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -823,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc)
+ if (rc) {
+ bnxt_ulp_sriov_cfg(bp, 0);
goto err_out2;
+ }
return 0;
@@ -832,6 +834,9 @@ err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+ /* Restore the max resources */
+ bnxt_hwrm_func_qcaps(bp);
+
err_out1:
bnxt_free_vf_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f02fe906dedb..f53387ed0167 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -28,7 +28,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo;
- struct bnxt_sw_tx_bd *tx_buf, *first_buf;
+ struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
int num_frags = 0;
u32 flags;
@@ -43,13 +43,14 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
- first_buf = tx_buf;
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ flags = (len << TX_BD_LEN_SHIFT) |
+ ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -82,7 +83,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
- txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
len = frag_len;
@@ -96,7 +96,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- return first_buf;
+ return tx_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 4af5561cbfc5..ddfe9208529a 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif
}
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
@@ -1392,7 +1392,7 @@ static void chtls_pass_accept_request(struct sock *sk,
th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield);
- ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 528eb0f223b1..b4f5e57d0285 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2287,7 +2287,7 @@ err:
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data)
+ u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
- if (!status) {
+ if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ memcpy(data, resp->page_data + off, len);
}
err:
mutex_unlock(&adapter->mcc_lock);
@@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
strlcpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db1f3b908582..e2085c68c0ee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data);
+ u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index dfa784339781..bd0df189d871 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
+ u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- data);
- if (status)
- goto err;
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
+
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
- status = be_cmd_read_port_transceiver_data(adapter,
- TR_PAGE_A2,
- data +
- PAGE_DATA_LEN);
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
if (status)
goto err;
}
- if (eeprom->offset)
- memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 5231818943c6..c03663785a8d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1764,6 +1764,19 @@ cleanup_clk:
return rc;
}
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+ struct device_node *child_np = of_get_child_by_name(np, name);
+ bool ret = false;
+
+ if (child_np) {
+ ret = true;
+ of_node_put(child_np);
+ }
+
+ return ret;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1883,7 +1896,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* Display what we found */
phy_attached_info(phy);
- } else if (np && !of_get_child_by_name(np, "mdio")) {
+ } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8a3a446219f7..94f80e1c4020 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -769,6 +769,7 @@ struct hnae3_tc_info {
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
u16 tqp_count[HNAE3_MAX_TC];
u16 tqp_offset[HNAE3_MAX_TC];
+ u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6d20974519fe..4c7988e308a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev,
if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
- netdev_info(ndev, "ringparam not changed\n");
+ netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
return false;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 1ebad0e50e6a..fae79764dc44 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
ret = hclge_get_sfp_info(hdev, mac);
- else
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
ret = hclge_get_sfp_speed(hdev, &speed);
+ }
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
@@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
@@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
ret = hclge_push_vf_link_status(vport);
if (ret) {
vport->vf_info.link_state = link_state_old;
@@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
if (ret)
return ret;
+ vport->port_base_vlan_cfg.tbl_sta = false;
/* remove old VLAN tag */
if (old_info->vlan_tag == 0)
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 1f87a8a3fe32..2f33b036a47a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
- u16 qs_id, u8 pri)
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
+ bool link_vld)
{
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_desc desc;
@@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
map->qs_id = cpu_to_le16(qs_id);
map->priority = pri;
- map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
+ map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_shaper_ir_para ir_para;
@@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
if (vport->vport_id) {
+ kinfo->tc_info.max_tc = 1;
kinfo->tc_info.num_tc = 1;
vport->qs_offset = HNAE3_MAX_TC +
vport->vport_id - HCLGE_VF_VPORT_START_NUM;
vport_max_rss_size = hdev->vf_rss_size_max;
} else {
+ kinfo->tc_info.max_tc = hdev->tc_max;
kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = 0;
@@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
- hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+ if (vport->vport_id == PF_VPORT_ID)
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
/* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active)
@@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
{
- u8 i;
+ u8 i, tc_sch_mode;
+ u32 bw_limit;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ if (i < hdev->tm_info.num_tc) {
+ tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ } else {
+ tc_sch_mode = HCLGE_SCH_MODE_SP;
+ bw_limit = 0;
+ }
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
- hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
hdev->tm_info.tc_info[i].pgid = 0;
- hdev->tm_info.tc_info[i].bw_limit =
- hdev->tm_info.pg_info[0].bw_limit;
+ hdev->tm_info.tc_info[i].bw_limit = bw_limit;
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
@@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
for (k = 0; k < hdev->num_alloc_vport; k++) {
struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
- for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ for (i = 0; i < kinfo->tc_info.max_tc; i++) {
+ u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
+ bool link_vld = i < kinfo->tc_info.num_tc;
+
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
vport[k].qs_offset + i,
- i);
+ pri, link_vld);
if (ret)
return ret;
}
@@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_TC; i++) {
ret = hclge_tm_qs_to_pri_map_cfg(hdev,
vport[k].qs_offset + i,
- k);
+ k, true);
if (ret)
return ret;
}
@@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
struct hclge_shaper_ir_para ir_para;
- u32 shaper_para;
+ u32 shaper_para_c, shaper_para_p;
int ret;
u32 i;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
u32 rate = hdev->tm_info.tc_info[i].bw_limit;
- ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
- &ir_para, max_tm_rate);
- if (ret)
- return ret;
+ if (rate) {
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ } else {
+ shaper_para_c = 0;
+ shaper_para_p = 0;
+ }
- shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para, rate);
+ shaper_para_c, rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
- ir_para.ir_u,
- ir_para.ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para, rate);
+ shaper_para_p, rate);
if (ret)
return ret;
}
@@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
int ret;
u32 i, k;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
pg_info =
&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
dwrr = pg_info->tc_dwrr[i];
@@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
return ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ if (i >= kinfo->tc_info.max_tc)
+ continue;
+
+ dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
ret = hclge_tm_qs_weight_cfg(
hdev, vport[k].qs_offset + i,
- vport[k].dwrr);
+ dwrr);
if (ret)
return ret;
}
@@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
{
struct hclge_vport *vport = hdev->vport;
int ret;
+ u8 mode;
u16 i;
ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
@@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
+
+ if (pri_id >= kinfo->tc_info.max_tc)
+ continue;
+
+ mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
+ HCLGE_SCH_MODE_SP;
ret = hclge_tm_qs_schd_mode_cfg(hdev,
vport[i].qs_offset + pri_id,
- HCLGE_SCH_MODE_DWRR);
+ mode);
if (ret)
return ret;
}
@@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 619cc30a2dfc..d943943912f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 60ae8bfc5f69..1749d26f4bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -43,9 +43,7 @@ static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
len += fw_image->fw_section_info[i].fw_section_len;
- memcpy(&host_image->image_section_info[i],
- &fw_image->fw_section_info[i],
- sizeof(struct fw_section_info_st));
+ host_image->image_section_info[i] = fw_image->fw_section_info[i];
}
if (len != fw_image->fw_len ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7e7fe5bdf1f8..5ab7c0f81e9a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -5981,6 +5981,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
release_sub_crqs(adapter, 0);
rc = init_sub_crqs(adapter);
} else {
+ /* no need to reinitialize completely, but we do
+ * need to clean up transmits that were in flight
+ * when we processed the reset. Failure to do so
+ * will confound the upper layer, usually TCP, by
+ * creating the illusion of transmits that are
+ * awaiting completion.
+ */
+ clean_tx_pools(adapter);
+
rc = reset_sub_crq_queues(adapter);
}
} else {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 13382df2f2ef..bcf680e83811 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -630,7 +630,6 @@ struct e1000_phy_info {
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
- bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e6c8e6d5234f..9466f65a6da7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
bool blocked = false;
int i = 0;
- /* Check the PHY (LCD) reset flag */
- if (hw->phy.reset_disable)
- return true;
-
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
usleep_range(10000, 11000);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 638a3ddd7ada..2504b11c3169 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -271,7 +271,6 @@
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fa06f68c8c80..f1729940e46c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6494,6 +6494,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
hw->mac.type >= e1000_pch_adp) {
+ /* Keep the GPT clock enabled for CSME */
+ mac_data = er32(FEXTNVM);
+ mac_data |= BIT(3);
+ ew32(FEXTNVM, mac_data);
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
@@ -6987,21 +6991,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
- u16 phy_data;
int rc;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
- hw->mac.type >= e1000_pch_adp) {
- /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
- e1e_rphy(hw, I217_MEMPWR, &phy_data);
- phy_data |= I217_MEMPWR_MOEM;
- e1e_wphy(hw, I217_MEMPWR, phy_data);
-
- /* Disable LCD reset */
- hw->phy.reset_disable = true;
- }
-
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
@@ -7023,8 +7014,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
- u16 phy_data;
int rc;
/* Introduce S0ix implementation */
@@ -7035,17 +7024,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
if (rc)
return rc;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
- hw->mac.type >= e1000_pch_adp) {
- /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
- e1e_rphy(hw, I217_MEMPWR, &phy_data);
- phy_data &= ~I217_MEMPWR_MOEM;
- e1e_wphy(hw, I217_MEMPWR, phy_data);
-
- /* Enable LCD reset */
- hw->phy.reset_disable = false;
- }
-
return e1000e_pm_thaw(dev);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 18558a019353..407fe8f340a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -37,6 +37,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
+#include <linux/bitfield.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include <linux/net/intel/i40e_client.h>
@@ -1092,6 +1093,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
(u32)(val & 0xFFFFFFFFULL));
}
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+ return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+ rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 610f00cbaff9..19704f5c8291 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2586,15 +2586,16 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, pf->state);
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ dev_warn(&pf->pdev->dev,
+ "Cannot start offline testing when PF is in reset state.\n");
+ goto skip_ol_tests;
+ }
+
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
- data[I40E_ETH_TEST_REG] = 1;
- data[I40E_ETH_TEST_EEPROM] = 1;
- data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LINK] = 1;
- eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -2641,9 +2642,17 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 0;
}
-skip_ol_tests:
-
netif_info(pf, drv, netdev, "testing finished\n");
+ return;
+
+skip_ol_tests:
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, pf->state);
+ netif_info(pf, drv, netdev, "testing failed\n");
}
static void i40e_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 332a608dbaa6..7f1a0d90dc51 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -551,6 +551,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
}
/**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+ int pf_count = i40e_get_pf_count(hw);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+ return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ new_data = rd64(hw, loreg);
+
+ if (!offset_loaded || new_data < *offset)
+ *offset = new_data;
+ *stat = new_data - *offset;
+}
+
+/**
* i40e_stat_update48 - read and update a 48 bit stat from the chip
* @hw: ptr to the hardware info
* @hireg: the high 32 bit reg to read
@@ -622,6 +663,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
}
/**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+ int stat_idx, bool offset_loaded,
+ struct i40e_eth_stats *stat_offset,
+ struct i40e_eth_stats *stat)
+{
+ u64 rx_rdpc, rx_rxerr;
+
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+ &stat_offset->rx_discards, &rx_rdpc);
+ i40e_stat_update64(hw,
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+ offset_loaded, &stat_offset->rx_discards_other,
+ &rx_rxerr);
+
+ stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated
**/
@@ -680,6 +749,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+ vsi->stat_offsets_loaded, oes, es);
+
vsi->stat_offsets_loaded = true;
}
@@ -8542,6 +8615,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+ if (!tc) {
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
@@ -10572,7 +10650,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
- int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
@@ -10580,13 +10658,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- i40e_check_recovery_mode(pf)) {
+ is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
- }
if (test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !old_recovery_mode_bit)
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -10613,13 +10689,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* accordingly with regard to resources initialization
* and deinitialization
*/
- if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
- old_recovery_mode_bit) {
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities))
goto end_unlock;
- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize
* misc vector here
*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1908eed4fa5e..7339003aa17c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -211,6 +211,11 @@
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
@@ -643,6 +648,14 @@
#define I40E_VFQF_HKEY1_MAX_INDEX 12
#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 36a4ca1ffb1a..7b3f30beb757 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1172,6 +1172,7 @@ struct i40e_eth_stats {
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
+ u64 rx_discards_other; /* rxerr1 */
};
/* Statistics collected per VEB per TC */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 2606e8f0f19b..86b0f21287dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2147,6 +2147,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
/* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+ eth_zero_addr(vf->default_lan_addr.addr);
+ }
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
@@ -2282,7 +2286,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ for (i = 0; i < vf->num_tc; i++)
num_qps_all += vf->ch[i].num_qps;
if (num_qps_all != qci->num_queue_pairs) {
aq_ret = I40E_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 49aed3e506a6..0ea0361cd86b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -64,7 +64,6 @@ struct iavf_vsi {
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
int base_vector;
- u16 work_limit;
u16 qs_handle;
void *priv; /* client driver data reference. */
};
@@ -159,8 +158,12 @@ struct iavf_vlan {
struct iavf_vlan_filter {
struct list_head list;
struct iavf_vlan vlan;
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_vlan:1; /* filter is new, wait for PF answer */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 padding:5;
+ };
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@@ -461,6 +464,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state)
return "__IAVF_INIT_VERSION_CHECK";
case __IAVF_INIT_GET_RESOURCES:
return "__IAVF_INIT_GET_RESOURCES";
+ case __IAVF_INIT_EXTENDED_CAPS:
+ return "__IAVF_INIT_EXTENDED_CAPS";
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ return "__IAVF_INIT_CONFIG_ADAPTER";
case __IAVF_INIT_SW:
return "__IAVF_INIT_SW";
case __IAVF_INIT_FAILED:
@@ -520,6 +527,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 3bb56714beb0..e535d4c3da49 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
struct iavf_ring *rx_ring, *tx_ring;
- ec->tx_max_coalesced_frames = vsi->work_limit;
- ec->rx_max_coalesced_frames = vsi->work_limit;
-
/* Rx and Tx usecs per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
*/
@@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
- vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
@@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 7dfcf78b57fb..2e2c153ce46a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -843,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
* iavf_get_num_vlans_added - get number of VLANs added
* @adapter: board private structure
*/
-static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
{
return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
@@ -906,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
return -ENOMEM;
- if (proto == cpu_to_be16(ETH_P_8021Q))
- set_bit(vid, adapter->vsi.active_cvlans);
- else
- set_bit(vid, adapter->vsi.active_svlans);
-
return 0;
}
@@ -984,7 +979,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
f->is_new_mac = true;
- f->is_primary = false;
+ f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -2245,7 +2240,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2956,6 +2950,9 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
+ bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+ bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 978f651c6b09..06d18797d25a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -194,7 +194,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct iavf_tx_buffer *tx_buf;
struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int budget = vsi->work_limit;
+ unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = IAVF_TX_DESC(tx_ring, i);
@@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
- if (!size)
- return NULL;
-
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
+ if (!size)
+ return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 782450d5c12f..1603e99bae4a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -627,6 +627,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
}
/**
+ * iavf_vlan_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove VLAN filters from list based on PF response.
+ **/
+static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+{
+ struct iavf_vlan_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->is_new_vlan) {
+ if (f->vlan.tpid == ETH_P_8021Q)
+ clear_bit(f->vlan.vid,
+ adapter->vsi.active_cvlans);
+ else
+ clear_bit(f->vlan.vid,
+ adapter->vsi.active_svlans);
+
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -683,6 +710,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
vvfl->vlan_id[i] = f->vlan.vid;
i++;
f->add = false;
+ f->is_new_vlan = true;
if (i == count)
break;
}
@@ -695,10 +723,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
} else {
+ u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
+ u16 current_vlans = iavf_get_num_vlans_added(adapter);
struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+ if ((count + current_vlans) > max_vlans &&
+ current_vlans < max_vlans) {
+ count = max_vlans - iavf_get_num_vlans_added(adapter);
+ more = true;
+ }
+
len = sizeof(*vvfl_v2) + ((count - 1) *
sizeof(struct virtchnl_vlan_filter));
if (len > IAVF_MAX_AQ_BUF_SIZE) {
@@ -725,6 +761,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
&adapter->vlan_v2_caps.filtering.filtering_support;
struct virtchnl_vlan *vlan;
+ if (i == count)
+ break;
+
/* give priority over outer if it's enabled */
if (filtering_support->outer)
vlan = &vvfl_v2->filters[i].outer;
@@ -736,8 +775,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
i++;
f->add = false;
- if (i == count)
- break;
+ f->is_new_vlan = true;
}
}
@@ -2080,6 +2118,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
*/
iavf_netdev_features_vlan_strip_set(netdev, true);
break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ iavf_vlan_add_reject(adapter);
+ dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -2332,6 +2375,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
+ case VIRTCHNL_OP_ADD_VLAN_V2: {
+ struct iavf_vlan_filter *f;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->is_new_vlan) {
+ f->is_new_vlan = false;
+ if (f->vlan.tpid == ETH_P_8021Q)
+ set_bit(f->vlan.vid,
+ adapter->vsi.active_cvlans);
+ else
+ set_bit(f->vlan.vid,
+ adapter->vsi.active_svlans);
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ }
+ break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
/* PF enabled vlan strip on this VF.
* Update netdev->features if needed to be in sync with ethtool.
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 61dd2f18dee8..b41bc3dc1745 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,6 +5,7 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 3991d62473bf..3337314a7b35 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -814,6 +814,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devlink_port_unregister(devlink_port);
}
+#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
@@ -840,8 +842,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- void *nvm_data;
- u32 nvm_size;
+ u8 *nvm_data, *tmp, i;
+ u32 nvm_size, left;
+ s8 num_blks;
int status;
nvm_size = hw->flash.flash_size;
@@ -849,26 +852,44 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
if (!nvm_data)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- vfree(nvm_data);
- return status;
- }
- status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
- if (status) {
- dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
- nvm_size, status, hw->adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
+ tmp = nvm_data;
+ left = nvm_size;
+
+ /* Some systems take longer to read the NVM than others which causes the
+ * FW to reclaim the NVM lock before the entire NVM has been read. Fix
+ * this by breaking the reads of the NVM into smaller chunks that will
+ * probably not take as long. This has some overhead since we are
+ * increasing the number of AQ commands, but it should always work
+ */
+ for (i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, tmp, false);
+ if (status) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ read_sz, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ice_release_nvm(hw);
+ vfree(nvm_data);
+ return -EIO;
+ }
ice_release_nvm(hw);
- vfree(nvm_data);
- return status;
- }
- ice_release_nvm(hw);
+ tmp += read_sz;
+ left -= read_sz;
+ }
*data = nvm_data;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1e71b70f0e52..70335f6e8524 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2190,6 +2190,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
}
/**
+ * ice_set_phy_type_from_speed - set phy_types based on speeds
+ * and advertised modes
+ * @ks: ethtool link ksettings struct
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @adv_link_speed: targeted link speeds bitmap
+ */
+static void
+ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
+ u64 *phy_type_low, u64 *phy_type_high,
+ u16 adv_link_speed)
+{
+ /* Handle 1000M speed in a special way because ice_update_phy_type
+ * enables all link modes, but having mixed copper and optical
+ * standards is not supported.
+ */
+ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+
+ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
+}
+
+/**
* ice_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
* @ks: ethtool ksettings
@@ -2320,7 +2356,8 @@ ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
+ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
+ adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -3470,6 +3507,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
new_rx = ch->combined_count + ch->rx_count;
new_tx = ch->combined_count + ch->tx_count;
+ if (new_rx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
+ if (new_tx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
if (new_rx > ice_get_max_rxq(pf)) {
netdev_err(dev, "Maximum allowed Rx channels is %d\n",
ice_get_max_rxq(pf));
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 665a344fb9c0..3dc5662d62a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -736,7 +736,87 @@ static int ice_finalize_update(struct pldmfw *context)
return 0;
}
-static const struct pldmfw_ops ice_fwu_ops = {
+struct ice_pldm_pci_record_id {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+};
+
+/**
+ * ice_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine if the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+static bool
+ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct ice_pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
+ */
+ if (value)
+ *ptr = value;
+ else
+ *ptr = PCI_ANY_ID;
+ }
+
+ /* the E822 device can have a generic device ID so check for that */
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device ||
+ id.device == ICE_DEV_ID_E822_SI_DFLT) &&
+ (id.subsystem_vendor == PCI_ANY_ID ||
+ id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID ||
+ id.subsystem_device == pdev->subsystem_device))
+ return true;
+
+ return false;
+}
+
+static const struct pldmfw_ops ice_fwu_ops_e810 = {
.match_record = &pldmfw_op_pci_match_record,
.send_package_data = &ice_send_package_data,
.send_component_table = &ice_send_component_table,
@@ -744,6 +824,14 @@ static const struct pldmfw_ops ice_fwu_ops = {
.finalize_update = &ice_finalize_update,
};
+static const struct pldmfw_ops ice_fwu_ops_e822 = {
+ .match_record = &ice_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
/**
* ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure
@@ -921,7 +1009,11 @@ int ice_devlink_flash_update(struct devlink *devlink,
memset(&priv, 0, sizeof(priv));
- priv.context.ops = &ice_fwu_ops;
+ /* the E822 device needs a slightly different ops */
+ if (hw->mac_type == ICE_MAC_GENERIC)
+ priv.context.ops = &ice_fwu_ops_e822;
+ else
+ priv.context.ops = &ice_fwu_ops_e810;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 454e01ae09b9..f7f9c973ec54 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -909,7 +909,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
* @vsi: the VSI being configured
* @ctxt: VSI context structure
*/
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
@@ -982,7 +982,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
else
vsi->num_rxq = num_rxq_per_tc;
+ if (vsi->num_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ vsi->num_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
vsi->num_txq = tx_count;
+ if (vsi->num_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ vsi->num_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1000,6 +1011,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
*/
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+
+ return 0;
}
/**
@@ -1187,7 +1200,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_CHNL) {
ice_chnl_vsi_setup_q_map(vsi, ctxt);
} else {
- ice_vsi_setup_q_map(vsi, ctxt);
+ ret = ice_vsi_setup_q_map(vsi, ctxt);
+ if (ret)
+ goto out;
+
if (!init_vsi) /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
@@ -3464,7 +3480,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
*
* Prepares VSI tc_config to have queue configurations based on MQPRIO options.
*/
-static void
+static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u8 ena_tc)
{
@@ -3513,7 +3529,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
/* Set actual Tx/Rx queue pairs */
vsi->num_txq = offset + qcount_tx;
+ if (vsi->num_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ vsi->num_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
+
vsi->num_rxq = offset + qcount_rx;
+ if (vsi->num_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ vsi->num_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
@@ -3531,6 +3558,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+
+ return 0;
}
/**
@@ -3580,9 +3609,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
else
- ice_vsi_setup_q_map(vsi, ctx);
+ ret = ice_vsi_setup_q_map(vsi, ctx);
+
+ if (ret)
+ goto out;
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e1cae253412c..ff2eac2f8c64 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5413,6 +5413,7 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
/* required last entry */
{ 0, }
};
@@ -5763,25 +5764,38 @@ static netdev_features_t
ice_fix_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- netdev_features_t supported_vlan_filtering;
- netdev_features_t requested_vlan_filtering;
- struct ice_vsi *vsi = np->vsi;
-
- requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES;
-
- /* make sure supported_vlan_filtering works for both SVM and DVM */
- supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER;
- if (ice_is_dvm_ena(&vsi->back->hw))
- supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER;
-
- if (requested_vlan_filtering &&
- requested_vlan_filtering != supported_vlan_filtering) {
- if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) {
- netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n");
- features |= supported_vlan_filtering;
+ netdev_features_t req_vlan_fltr, cur_vlan_fltr;
+ bool cur_ctag, cur_stag, req_ctag, req_stag;
+
+ cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+ cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+ req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
+ req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+ req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if (req_vlan_fltr != cur_vlan_fltr) {
+ if (ice_is_dvm_ena(&np->vsi->back->hw)) {
+ if (req_ctag && req_stag) {
+ features |= NETIF_VLAN_FILTERING_FEATURES;
+ } else if (!req_ctag && !req_stag) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ } else if ((!cur_ctag && req_ctag && !cur_stag) ||
+ (!cur_stag && req_stag && !cur_ctag)) {
+ features |= NETIF_VLAN_FILTERING_FEATURES;
+ netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
+ } else if ((cur_ctag && !req_ctag && cur_stag) ||
+ (cur_stag && !req_stag && cur_ctag)) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
+ }
} else {
- netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n");
- features &= ~supported_vlan_filtering;
+ if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
+ netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
+
+ if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 662947c882e8..ef9344ef0d8e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2271,7 +2271,7 @@ static int
ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->quad = port / ICE_PORTS_PER_QUAD;
- tx->quad_offset = tx->quad * INDEX_PER_PORT;
+ tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
tx->len = INDEX_PER_PORT;
return ice_ptp_alloc_tx_tracker(tx);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index afd048d69959..10e396abf130 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -49,6 +49,37 @@ struct ice_perout_channel {
* To allow multiple ports to access the shared register block independently,
* the blocks are split up so that indexes are assigned to each port based on
* hardware logical port number.
+ *
+ * The timestamp blocks are handled differently for E810- and E822-based
+ * devices. In E810 devices, each port has its own block of timestamps, while in
+ * E822 there is a need to logically break the block of registers into smaller
+ * chunks based on the port number to avoid collisions.
+ *
+ * Example for port 5 in E810:
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * |register|register|register|register|register|register|register|register|
+ * | block | block | block | block | block | block | block | block |
+ * | for | for | for | for | for | for | for | for |
+ * | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * ^^
+ * ||
+ * |--- quad offset is always 0
+ * ---- quad number
+ *
+ * Example for port 5 in E822:
+ * +-----------------------------+-----------------------------+
+ * | register block for quad 0 | register block for quad 1 |
+ * |+------+------+------+------+|+------+------+------+------+|
+ * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3||
+ * |+------+------+------+------+|+------+------+------+------+|
+ * +-----------------------------+-------^---------------------+
+ * ^ |
+ * | --- quad offset*
+ * ---- quad number
+ *
+ * * PHY port 5 is port 1 in quad 1
+ *
*/
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 0a0c55fb8699..b803f2ab3cc7 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -524,6 +524,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
+ fltr->dest_id = rule_added.vsi_handle;
exit:
kfree(list);
@@ -993,7 +994,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
n_proto_key = ntohs(match.key->n_proto);
n_proto_mask = ntohs(match.mask->n_proto);
- if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
+ fltr->tunnel_type == TNL_GTPU ||
+ fltr->tunnel_type == TNL_GTPC) {
n_proto_key = 0;
n_proto_mask = 0;
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index cd8e6b50968c..7adf9ddf129e 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
}
if (ice_is_vf_disabled(vf)) {
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_all_rx_rings(vsi);
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1d9b84c3937a..4547bc1f7cee 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1569,35 +1569,27 @@ error_param:
*/
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- int i, q_idx;
+ int i = -1, q_idx;
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
- }
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
goto error_param;
- }
vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (!vsi)
goto error_param;
- }
if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
* for selected "vsi"
*/
if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* Disable any existing queue first */
- if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
goto error_param;
- }
/* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, i);
goto error_param;
}
}
@@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ qpi->rxq.databuffer_size < 1024))
goto error_param;
- }
vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ qpi->rxq.max_pkt_size < 64)
goto error_param;
- }
vsi->max_frame = qpi->rxq.max_pkt_size;
/* add space for the port VLAN since the VF driver is
@@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, i);
goto error_param;
}
}
}
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
- NULL, 0);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 68be2976f539..c5f04c40284b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ /* Free all the Tx ring sk_buffs or xdp frames */
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ dev_kfree_skb_any(tx_buffer->skb);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -9898,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 reg;
if (hw->mac.type > e1000_82580) {
if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
/* force threshold to 0. */
wr32(E1000_DMCTXTH, 0);
@@ -9935,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* Disable BMC-to-OS Watchdog Enable */
if (hw->mac.type != e1000_i354)
reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
-
wr32(E1000_DMACR, reg);
/* no lower threshold to disable
@@ -9952,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
*/
wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
(IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+ }
- /* make low power state decision controlled
- * by DMA coal
- */
+ if (hw->mac.type >= e1000_i210 ||
+ (adapter->flags & IGB_FLAG_DMAC)) {
reg = rd32(E1000_PCIEMISC);
- reg &= ~E1000_PCIEMISC_LX_DECISION;
+ reg |= E1000_PCIEMISC_LX_DECISION;
wr32(E1000_PCIEMISC, reg);
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ae17af44fe02..a5ebee7df4a8 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6171,6 +6171,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e197a33d93a0..026c3b65fc37 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -306,7 +306,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- writel((val), &hw_addr[(reg)]); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@@ -318,4 +319,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 921a4d977d65..8813b4dd6872 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -779,6 +779,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
+ spinlock_t vfs_lock;
};
static inline int ixgbe_determine_xdp_q_idx(int cpu)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 77c2e70b0860..55f91c9ff047 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6403,6 +6403,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
+
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7f11c0a8e7a9..a1e69c734863 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -205,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ unsigned long flags;
int rss;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -1184,9 +1187,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ disable = IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
- enable = 0;
+ enable = IXGBE_VMOLR_BAM;
break;
case IXGBEVF_XCAST_MODE_MULTI:
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1208,9 +1211,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
return -EPERM;
}
- disable = 0;
+ disable = IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
break;
default:
return -EOPNOTSUPP;
@@ -1355,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
@@ -1370,6 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index cc51149790ff..3d5d39a52fe6 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -52,7 +52,7 @@
#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
-#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF)
/* SDP Function select */
#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index bc614a4def9e..3f60a80e34c8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1390,7 +1390,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index d43e503c644f..4d93ad6a284c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -167,12 +167,12 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
}
port = netdev_priv(ingress_dev);
- mask = htons(0x1FFF);
- key = htons(port->hw_id);
+ mask = htons(0x1FFF << 3);
+ key = htons(port->hw_id << 3);
rule_match_set(r_match->key, SYS_PORT, key);
rule_match_set(r_match->mask, SYS_PORT, mask);
- mask = htons(0x1FF);
+ mask = htons(0x3FF);
key = htons(port->dev_id);
rule_match_set(r_match->key, SYS_DEV, key);
rule_match_set(r_match->mask, SYS_DEV, mask);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 3754d8aec76d..3c8116f16b4d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -588,6 +588,7 @@ err_router_lib_init:
void prestera_router_fini(struct prestera_switch *sw)
{
+ unregister_fib_notifier(&init_net, &sw->router->fib_nb);
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
rhashtable_destroy(&sw->router->kern_fib_cache_ht);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b3b3c079a0fa..59c9a10f83ba 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
return true;
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
@@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
@@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (ring->frag_size <= PAGE_SIZE)
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ else
+ ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
if (!ring->data[i])
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 90e7dfd011c9..5d457bc9acc1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -93,6 +93,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
};
struct net_device_path path = {};
+ if (!ctx.dev)
+ return -ENODEV;
+
memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 8f0cd3196aac..29be2fcafea3 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -651,7 +651,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
* WDMA RX.
*/
- BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
+ BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ed5038d98ef6..6400a827173c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
- return 0;
+ return ret;
}
i += ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 0eb9d74547f8..50422b56a64d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other)
return pci_get_drvdata(to_pci_dev(other));
}
-static int next_phys_dev(struct device *dev, const void *data)
-{
- struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
-
- mdev = pci_get_other_drvdata(this->device, dev);
- if (!mdev)
- return 0;
-
- return _next_phys_dev(mdev, data);
-}
-
static int next_phys_dev_lag(struct device *dev, const void *data)
{
struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
@@ -624,13 +613,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
}
/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
-{
- lockdep_assert_held(&mlx5_intf_mutex);
- return mlx5_get_next_dev(dev, &next_phys_dev);
-}
-
-/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index eae9aa9c0811..978a2bb8e122 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
if (!tracer->owner)
return;
+ if (unlikely(!tracer->str_db.loaded))
+ goto arm;
+
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
}
+arm:
mlx5_fw_tracer_arm(dev);
}
@@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
- if (likely(tracer->str_db.loaded))
- queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 68364484a435..3c1edfa33aa7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
- bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+ bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+ MLX5_CAP_GEN(mdev, relaxed_ordering_write);
return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 25f51f80a9b4..ba171c7f0a67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv {
struct mlx5_ct_fs *fs;
struct mlx5_ct_fs_ops *fs_ops;
spinlock_t ht_lock; /* protects ft entries */
+ struct workqueue_struct *wq;
struct mlx5_tc_ct_debugfs debugfs;
};
@@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
static void
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
{
- struct mlx5e_priv *priv;
-
if (!refcount_dec_and_test(&entry->refcnt))
return;
- priv = netdev_priv(entry->ct_priv->netdev);
INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
- queue_work(priv->wq, &entry->work);
+ queue_work(entry->ct_priv->wq, &entry->work);
}
static struct mlx5_ct_counter *
@@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
- struct mlx5e_priv *priv;
-
if (!refcount_dec_and_test(&ft->refcount))
return;
+ flush_workqueue(ct_priv->wq);
nf_flow_table_offload_del_cb(ft->nf_ft,
mlx5_tc_ct_block_flow_offload, ft);
rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
rhashtable_free_and_destroy(&ft->ct_entries_ht,
mlx5_tc_ct_flush_ft_entry,
ct_priv);
- priv = netdev_priv(ct_priv->netdev);
- flush_workqueue(priv->wq);
mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft);
@@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
goto err_ct_tuples_nat_ht;
+ ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
+ if (!ct_priv->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
err = mlx5_tc_ct_fs_init(ct_priv);
if (err)
goto err_init_fs;
@@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
return ct_priv;
err_init_fs:
+ destroy_workqueue(ct_priv->wq);
+err_wq:
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
err_ct_tuples_nat_ht:
rhashtable_destroy(&ct_priv->ct_tuples_ht);
@@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
if (!ct_priv)
return;
+ destroy_workqueue(ct_priv->wq);
mlx5_ct_tc_remove_dbgfs(ct_priv);
chains = ct_priv->chains;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 0bb0633b7542..27483aa7be8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_rx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_RX);
+ BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);
*ctx = priv_rx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 4b6f0d1ea59a..f239fb2e832f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -68,8 +68,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
struct mlx5e_ktls_offload_context_tx **ctx =
__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
*ctx = priv_tx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 43a536cb81db..c0f409c195bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -38,11 +38,12 @@
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
+ bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
- MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
- MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index eb90e79388f1..f797fd97d305 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -950,6 +950,13 @@ err_event_reg:
return err;
}
+static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+{
+ mlx5e_rep_tc_netdevice_event_unregister(rpriv);
+ mlx5e_rep_bond_cleanup(rpriv);
+ mlx5e_rep_tc_cleanup(rpriv);
+}
+
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5e_tc_ht_init(&rpriv->tc_ht);
- if (err)
- goto err_ht_init;
-
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
goto err_init_tx;
}
+ err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+ if (err)
+ goto err_ht_init;
+
return 0;
-err_init_tx:
- mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
err_ht_init:
+ if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
+ mlx5e_cleanup_uplink_rep_tx(rpriv);
+err_init_tx:
mlx5e_destroy_tises(priv);
return err;
}
-static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
-{
- mlx5e_rep_tc_netdevice_event_unregister(rpriv);
- mlx5e_rep_bond_cleanup(rpriv);
- mlx5e_rep_tc_cleanup(rpriv);
-}
-
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- mlx5e_destroy_tises(priv);
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
- mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+ mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 57fa0489eeb8..1e87bb2b7541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -688,7 +688,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
return;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 34bf11cdf90f..9ca2c8763237 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3793,7 +3793,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
- if (mlx5e_eswitch_uplink_rep(out_dev) &&
+ if (same_hw_reps(priv, out_dev) &&
MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
return true;
@@ -4529,13 +4529,6 @@ static int mlx5e_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
- act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not pipe or ok");
- return -EOPNOTSUPP;
- }
-
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, act)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4586,6 +4579,12 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue");
+ return -EOPNOTSUPP;
+ }
+
err = mlx5e_policer_validate(flow_action, act, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 50d14cec4894..9a7250be229f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -341,6 +341,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
}
}
+static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ /* Must not be called when a MPWQE session is active but empty. */
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wi = &sq->db.wqe_info[pi];
+
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = 1,
+ };
+
+ wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
const struct mlx5e_tx_attr *attr,
@@ -459,6 +479,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
@@ -560,6 +581,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_xmit_data txd;
+ txd.data = skb->data;
+ txd.len = skb->len;
+
+ txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+ goto err_unmap;
+
if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
mlx5e_tx_mpwqe_session_start(sq, eseg);
} else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
@@ -569,18 +597,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->xmit_more += xmit_more;
- txd.data = skb->data;
- txd.len = skb->len;
-
- txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
- goto err_unmap;
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
-
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
-
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
-
mlx5e_tx_skb_update_hwts_flags(skb);
if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
@@ -602,6 +621,7 @@ err_unmap:
mlx5e_dma_unmap_wqe_err(sq, 1);
sq->stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
@@ -1006,5 +1026,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ mlx5e_tx_flush(sq);
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 9d17206d1625..fabe49a35a5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -11,6 +11,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "fs_ft_pool.h"
#include "esw/qos.h"
enum {
@@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- ft_attr.max_fte = table_size;
+ ft_attr.max_fte = POOL_NEXT_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
@@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
goto out;
}
esw->fdb_table.legacy.fdb = fdb;
+ table_size = fdb->max_fte;
/* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 217cac29057f..2ce3728576d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
- if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
- break;
-
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
@@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
+ if (!mlx5_is_lag_supported(esw->dev))
+ return;
+
mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
@@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
+ if (!mlx5_is_lag_supported(esw->dev))
+ return;
+
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fdcf7f529330..21e5c709b2d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
return NULL;
}
-static bool check_conflicting_actions(u32 action1, u32 action2)
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
+ const struct mlx5_fs_vlan *vlan1)
{
- u32 xored_actions = action1 ^ action2;
+ return vlan0->ethtype != vlan1->ethtype ||
+ vlan0->vid != vlan1->vid ||
+ vlan0->prio != vlan1->prio;
+}
+
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
+ const struct mlx5_flow_act *act2)
+{
+ u32 action1 = act1->action;
+ u32 action2 = act2->action;
+ u32 xored_actions;
+
+ xored_actions = action1 ^ action2;
/* if one rule only wants to count, it's ok */
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
@@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true;
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ act1->pkt_reformat != act2->pkt_reformat)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ act1->modify_hdr != act2->modify_hdr)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
+ return true;
+
return false;
}
@@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act->action, fte->action.action)) {
+ if (check_conflicting_actions(flow_act, &fte->action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index 15e41dc84d53..b8feaf0f5c4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -44,7 +44,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
- mode = mlx5_get_str_port_sel_mode(ldev);
+ mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
else
ret = -EINVAL;
mutex_unlock(&ldev->lock);
@@ -72,6 +72,7 @@ static int state_show(struct seq_file *file, void *priv)
static int flags_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
+ bool fdb_sel_mode_native;
struct mlx5_lag *ldev;
bool shared_fdb;
bool lag_active;
@@ -79,14 +80,21 @@ static int flags_show(struct seq_file *file, void *priv)
ldev = dev->priv.lag;
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
- if (lag_active)
- shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ if (!lag_active)
+ goto unlock;
+
+ shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+ &ldev->mode_flags);
+unlock:
mutex_unlock(&ldev->lock);
if (!lag_active)
return -EINVAL;
seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off");
+ seq_printf(file, "%s:%s\n", "fdb_selection_mode",
+ fdb_sel_mode_native ? "native" : "affinity");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 552b6e26e701..5d41e19378e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -68,14 +68,15 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
unsigned long flags)
{
- bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
+ bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
+ &flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
- MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+ MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
@@ -471,8 +472,13 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
*flags = 0;
- if (shared_fdb)
+ if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
+ set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
+ }
+
+ if (mode == MLX5_LAG_MODE_MPESW)
+ set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
if (roce_lag)
return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
@@ -481,9 +487,9 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
return 0;
}
-char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev)
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{
- int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags);
+ int port_sel_mode = get_port_sel_mode(mode, flags);
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity";
@@ -507,7 +513,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
- shared_fdb, mlx5_get_str_port_sel_mode(ldev));
+ shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
if (err) {
@@ -783,7 +789,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
- struct lag_tracker tracker;
+ struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 72f70fad4641..ce2ce8ccbd70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -24,6 +24,7 @@ enum {
enum {
MLX5_LAG_MODE_FLAG_HASH_BASED,
MLX5_LAG_MODE_FLAG_SHARED_FDB,
+ MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
};
enum mlx5_lag_mode {
@@ -74,6 +75,16 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw;
};
+static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
+ return false;
+ return true;
+}
+
static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
{
@@ -104,7 +115,7 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
-char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev);
+char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
u8 *ports, int *num_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index ee4b25a50315..f643202b29c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -41,7 +41,6 @@ void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = dev->priv.lag;
- bool shared_fdb;
int err = 0;
if (!ldev)
@@ -55,8 +54,8 @@ int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
err = -EINVAL;
goto out;
}
- shared_fdb = mlx5_shared_fdb_supported(ldev);
- err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb);
+
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
if (err)
mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 484cb1e4fc7f..9cc7afea2758 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index a68d931090dd..15c8d4de8350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -8,8 +8,8 @@
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
- MLXSW_SP_COUNTER_SUB_POOL_FLOW,
MLXSW_SP_COUNTER_SUB_POOL_RIF,
+ MLXSW_SP_COUNTER_SUB_POOL_FLOW,
};
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9dbb573d53ea..ce33dbde124d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4415,6 +4415,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_nexthop_neigh_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -5382,7 +5384,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
{
const struct fib_nh *nh = fib_info_nh(fi, 0);
- return nh->fib_nh_scope == RT_SCOPE_LINK ||
+ return nh->fib_nh_gw_family ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
@@ -6740,6 +6742,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
+ int err;
nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
@@ -6755,7 +6758,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_nexthop_type_init;
+
+ return 0;
+
+err_nexthop_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
@@ -10312,7 +10324,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
unsigned long *fields = config->fields;
u32 hash_fields;
- switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
+ switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
case 0:
mlxsw_sp_mp4_hash_outer_addr(config);
break;
@@ -10330,7 +10342,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_mp_hash_inner_l3(config);
break;
case 3:
- hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
+ hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
/* Outer */
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
@@ -10511,13 +10523,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct net *net = mlxsw_sp_net(mlxsw_sp);
- bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
+ bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index 005e56ea5da1..5893770bfd94 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
unsigned int vid,
enum macaccess_entry_type type)
{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
@@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
lan966x, ANA_MACACCESS);
- return lan966x_mac_wait_for_completion(lan966x);
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
}
/* The mask of the front ports is encoded inside the mac parameter via a call
@@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
}
-int lan966x_mac_forget(struct lan966x *lan966x,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid,
- enum macaccess_entry_type type)
+static int lan966x_mac_forget_locked(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
{
+ lockdep_assert_held(&lan966x->mac_lock);
+
lan966x_mac_select(lan966x, mac, vid);
/* Issue a forget command */
@@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x,
return lan966x_mac_wait_for_completion(lan966x);
}
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
+ spin_unlock(&lan966x->mac_lock);
+
+ return ret;
+}
+
int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
{
return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
@@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
{
struct lan966x_mac_entry *mac_entry;
- mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
if (!mac_entry)
return NULL;
@@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
struct lan966x_mac_entry *res = NULL;
struct lan966x_mac_entry *mac_entry;
- spin_lock(&lan966x->mac_lock);
list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
if (mac_entry->vid == vid &&
ether_addr_equal(mac, mac_entry->mac) &&
@@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
break;
}
}
- spin_unlock(&lan966x->mac_lock);
return res;
}
@@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
{
struct lan966x_mac_entry *mac_entry;
- if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ spin_lock(&lan966x->mac_lock);
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
+ spin_unlock(&lan966x->mac_lock);
return 0;
+ }
/* In case the entry already exists, don't add it again to SW,
* just update HW, but we need to look in the actual HW because
@@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
* add the entry but without the extern_learn flag.
*/
mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
- if (mac_entry)
- return lan966x_mac_learn(lan966x, port->chip_port,
- addr, vid, ENTRYTYPE_LOCKED);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ goto mac_learn;
+ }
mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
- if (!mac_entry)
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
return -ENOMEM;
+ }
- spin_lock(&lan966x->mac_lock);
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
- lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+mac_learn:
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+
return 0;
}
@@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
list) {
if (mac_entry->vid == vid &&
ether_addr_equal(addr, mac_entry->mac)) {
- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
- ENTRYTYPE_LOCKED);
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
@@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x)
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
list) {
- lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
- ENTRYTYPE_LOCKED);
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid, ENTRYTYPE_LOCKED);
list_del(&mac_entry->list);
kfree(mac_entry);
@@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
{
struct lan966x_mac_entry *mac_entry, *tmp;
unsigned char mac[ETH_ALEN] __aligned(2);
+ struct list_head mac_deleted_entries;
u32 dest_idx;
u32 column;
u16 vid;
+ INIT_LIST_HEAD(&mac_deleted_entries);
+
spin_lock(&lan966x->mac_lock);
list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
bool found = false;
@@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
}
if (!found) {
- /* Notify the bridge that the entry doesn't exist
- * anymore in the HW and remove the entry from the SW
- * list
- */
- lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- mac_entry->mac, mac_entry->vid,
- lan966x->ports[mac_entry->port_index]->dev);
-
list_del(&mac_entry->list);
- kfree(mac_entry);
+ /* Move the entry from SW list to a tmp list such that
+ * it would be deleted later
+ */
+ list_add_tail(&mac_entry->list, &mac_deleted_entries);
}
}
spin_unlock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW
+ */
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ lan966x->ports[mac_entry->port_index]->dev);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
/* Now go to the list of columns and see if any entry was not in the SW
* list, then that means that the entry is new so it needs to notify the
* bridge.
@@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
continue;
+ spin_lock(&lan966x->mac_lock);
+ mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
+ if (mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
+ continue;
+ }
+
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
- if (!mac_entry)
+ if (!mac_entry) {
+ spin_unlock(&lan966x->mac_lock);
return;
+ }
mac_entry->row = row;
-
- spin_lock(&lan966x->mac_lock);
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
@@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
lan966x, ANA_MACTINDX);
while (1) {
+ spin_lock(&lan966x->mac_lock);
lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
ANA_MACACCESS_MAC_TABLE_CMD,
lan966x, ANA_MACACCESS);
@@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
stop = false;
if (column == LAN966X_MAC_COLUMNS - 1 &&
- index == 0 && stop)
+ index == 0 && stop) {
+ spin_unlock(&lan966x->mac_lock);
break;
+ }
entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+ spin_unlock(&lan966x->mac_lock);
/* Once all the columns are read process them */
if (column == LAN966X_MAC_COLUMNS - 1) {
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 5784c4161e5e..1d6e3b641b2e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -994,7 +994,7 @@ static int lan966x_probe(struct platform_device *pdev)
struct fwnode_handle *ports, *portnp;
struct lan966x *lan966x;
u8 mac_addr[ETH_ALEN];
- int err, i;
+ int err;
lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
if (!lan966x)
@@ -1025,11 +1025,7 @@ static int lan966x_probe(struct platform_device *pdev)
if (err)
return dev_err_probe(&pdev->dev, err, "Reset failed");
- i = 0;
- fwnode_for_each_available_child_node(ports, portnp)
- ++i;
-
- lan966x->num_phys_ports = i;
+ lan966x->num_phys_ports = NUM_PHYS_PORTS;
lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
sizeof(struct lan966x_port *),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 3b86ddddc756..2787055c1847 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -34,6 +34,7 @@
/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
#define QSYS_Q_RSRV 95
+#define NUM_PHYS_PORTS 8
#define CPU_PORT 8
/* Reserved PGIDs */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 3429660cd2e5..5edc8b7176c8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -396,6 +396,9 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
u32 mact_entry;
int res, err;
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
if (netif_is_bridge_master(v->obj.orig_dev)) {
sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
return 0;
@@ -466,6 +469,9 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
u32 mact_entry, res, pgid_entry[3];
int err;
+ if (!sparx5_netdevice_check(dev))
+ return -EOPNOTSUPP;
+
if (netif_is_bridge_master(v->obj.orig_dev)) {
sparx5_mact_forget(spx5, v->addr, v->vid);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index 083fddd263ec..8e3894cf5f7c 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -94,19 +94,18 @@ static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
}
+static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
+{
+ return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+}
+
static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
{
- unsigned long timeout;
u32 safe;
- timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
- do {
- safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
- if (safe & BIT(chan))
- return 0;
- } while (time_after(jiffies, timeout));
-
- return -ETIMEDOUT;
+ return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
+ safe & BIT(chan), 0,
+ OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
}
static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0147de405365..ffb6f6d05a07 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -474,7 +474,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
ip_rt_put(rt);
} else {
- set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 443a5d6eb57b..7c31a46195b2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv6);
}
+ if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ map[FLOW_PAY_QINQ] = key_size;
+ key_size += sizeof(struct nfp_flower_vlan);
+ }
+
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
map[FLOW_PAY_GRE] = key_size;
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
@@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
}
- if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
- map[FLOW_PAY_QINQ] = key_size;
- key_size += sizeof(struct nfp_flower_vlan);
- }
-
if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
(in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
map[FLOW_PAY_UDP_TUN] = key_size;
@@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
+ offset = key_map[FLOW_PAY_QINQ];
+ key = kdata + offset;
+ msk = mdata + offset;
+ for (i = 0; i < _CT_TYPE_MAX; i++) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
+ (struct nfp_flower_vlan *)msk,
+ rules[i]);
+ }
+ }
+
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
offset = key_map[FLOW_PAY_GRE];
key = kdata + offset;
@@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
- offset = key_map[FLOW_PAY_QINQ];
- key = kdata + offset;
- msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
- (struct nfp_flower_vlan *)msk,
- rules[i]);
- }
- }
-
if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
offset = key_map[FLOW_PAY_UDP_TUN];
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 193a167a6762..e01430139b6d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match;
@@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
- if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
- nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
- (struct nfp_flower_vlan *)msk,
- rule);
- ext += sizeof(struct nfp_flower_vlan);
- msk += sizeof(struct nfp_flower_vlan);
- }
-
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 6bf3ec448e7e..97dcf8db7ed2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -447,7 +447,8 @@ void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,
static void
nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
- void *flow, struct neighbour *neigh, bool is_ipv6)
+ void *flow, struct neighbour *neigh, bool is_ipv6,
+ bool override)
{
bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;
size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) :
@@ -546,6 +547,13 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (nn_entry->flow)
list_del(&nn_entry->list_head);
kfree(nn_entry);
+ } else if (nn_entry && !neigh_invalid && override) {
+ mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 :
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH;
+ nfp_tun_link_predt_entries(app, nn_entry);
+ nfp_flower_xmit_tun_conf(app, mtype, neigh_size,
+ nn_entry->payload,
+ GFP_ATOMIC);
}
spin_unlock_bh(&priv->predt_lock);
@@ -610,7 +618,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
dst_release(dst);
}
- nfp_tun_write_neigh(n->dev, app, &flow6, n, true);
+ nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
#else
return NOTIFY_DONE;
#endif /* CONFIG_IPV6 */
@@ -633,7 +641,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
ip_rt_put(rt);
}
- nfp_tun_write_neigh(n->dev, app, &flow4, n, false);
+ nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
}
#else
return NOTIFY_DONE;
@@ -676,7 +684,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, false);
+ nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
neigh_release(n);
rcu_read_unlock();
return;
@@ -718,7 +726,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
if (!n)
goto fail_rcu_unlock;
- nfp_tun_write_neigh(n->dev, app, &flow, n, true);
+ nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
neigh_release(n);
rcu_read_unlock();
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index e509d6dcba5c..805071d64a20 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -125,17 +125,18 @@ nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
static int
nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring,
- unsigned int nr_frags, struct sk_buff *skb)
+ struct sk_buff *skb)
{
unsigned int n_descs, wr_p, nop_slots;
const skb_frag_t *frag, *fend;
struct nfp_nfdk_tx_desc *txd;
+ unsigned int nr_frags;
unsigned int wr_idx;
int err;
recount_descs:
n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb));
-
+ nr_frags = skb_shinfo(skb)->nr_frags;
frag = skb_shinfo(skb)->frags;
fend = frag + nr_frags;
for (; frag < fend; frag++)
@@ -281,10 +282,13 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
if (unlikely((int)metadata < 0))
goto err_flush;
- nr_frags = skb_shinfo(skb)->nr_frags;
- if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb))
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
goto err_flush;
+ /* nr_frags will change after skb_linearize so we get nr_frags after
+ * nfp_nfdk_tx_maybe_close_block function
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
/* DMA map all */
wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
txd = &tx_ring->ktxds[wr_idx];
@@ -310,7 +314,16 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
/* FIELD_PREP() implicitly truncates to chunk */
dma_len -= 1;
- dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+
+ /* We will do our best to pass as much data as we can in descriptor
+ * and we need to make sure the first descriptor includes whole head
+ * since there is limitation in firmware side. Sometimes the value of
+ * dma_len bitwise and NFDK_DESC_TX_DMA_LEN_HEAD will less than
+ * headlen.
+ */
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
@@ -925,7 +938,9 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
/* FIELD_PREP() implicitly truncates to chunk */
dma_len -= 1;
- dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
@@ -1303,7 +1318,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
skb_push(skb, 4));
}
- if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb))
+ if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))
goto err_free;
/* DMA map all */
@@ -1328,7 +1343,9 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
txbuf++;
dma_len -= 1;
- dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) |
+ dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD,
+ dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
+ NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);
txd->dma_len_type = cpu_to_le16(dlen_type);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 54af30961351..6eeeb0fda91f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -15,7 +15,7 @@
#include "nfp_net_sriov.h"
static int
-nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
+nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn)
{
u16 cap_vf;
@@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP);
if ((cap_vf & cap) != cap) {
- nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
+ if (warn)
+ nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
return -EOPNOTSUPP;
}
if (vf < 0 || vf >= app->pf->num_vfs) {
- nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
+ if (warn)
+ nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
return -EINVAL;
}
@@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
unsigned int vf_offset;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true);
if (err)
return err;
@@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u32 vlan_tag;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true);
if (err)
return err;
@@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
}
/* Check if fw supports or not */
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true);
if (err)
is_proto_sup = false;
@@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf,
u32 vf_offset, ratevalue;
int err;
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true);
if (err)
return err;
@@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF,
- "spoofchk");
+ "spoofchk", true);
if (err)
return err;
@@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
- "trust");
+ "trust", true);
if (err)
return err;
@@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE,
- "link_state");
+ "link_state", true);
if (err)
return err;
@@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
u8 flags;
int err;
- err = nfp_net_sriov_check(app, vf, 0, "");
+ err = nfp_net_sriov_check(app, vf, 0, "", true);
if (err)
return err;
@@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
- if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"))
+ if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false))
ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
- err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate");
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false);
if (!err) {
rate = readl(app->pf->vfcfg_tbl2 + vf_offset +
NFP_NET_VF_CFG_RATE);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3098d6672192..1b7fdb4f056b 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4190,7 +4190,6 @@ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
struct sk_buff *skb, u32 *opts)
{
- u32 transport_offset = (u32)skb_transport_offset(skb);
struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 mss = shinfo->gso_size;
@@ -4207,7 +4206,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
WARN_ON_ONCE(1);
}
- opts[0] |= transport_offset << GTTCPHO_SHIFT;
+ opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
opts[1] |= mss << TD1_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
@@ -4235,7 +4234,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
else
WARN_ON_ONCE(1);
- opts[1] |= transport_offset << TCPHO_SHIFT;
+ opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
} else {
unsigned int padto = rtl_quirk_packet_padto(tp, skb);
@@ -4402,14 +4401,13 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- int transport_offset = skb_transport_offset(skb);
struct rtl8169_private *tp = netdev_priv(dev);
if (skb_is_gso(skb)) {
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
features = rtl8168evl_fix_tso(skb, features);
- if (transport_offset > GTTCPHO_MAX &&
+ if (skb_transport_offset(skb) > GTTCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -4420,7 +4418,7 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
if (rtl_quirk_packet_padto(tp, skb))
features &= ~NETIF_F_CSUM_MASK;
- if (transport_offset > TCPHO_MAX &&
+ if (skb_transport_offset(skb) > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 186cb28c03bd..8b62ce21aff3 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
efx_update_sw_stats(efx, stats);
out:
+ /* releasing a DMA coherent buffer with BH disabled can panic */
+ spin_unlock_bh(&efx->stats_lock);
efx_nic_free_buffer(efx, &stats_buf);
+ spin_lock_bh(&efx->stats_lock);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 7f5aa4a8c451..92550c7e85ce 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -408,8 +408,9 @@ fail1:
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int vfs_assigned = pci_vfs_assigned(dev);
- int rc = 0;
+ int i, rc = 0;
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
return -EBUSY;
}
- if (!vfs_assigned)
+ if (!vfs_assigned) {
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].pci_dev = NULL;
pci_disable_sriov(dev);
- else
+ } else {
rc = -EBUSY;
+ }
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index a0654e88444c..0329caf63279 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1515,14 +1515,14 @@ static void epic_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
+ unregister_netdev(dev);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
ep->tx_ring_dma);
dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
ep->rx_ring_dma);
- unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
- pci_release_regions(pdev);
free_netdev(dev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
/* pci_power_off(pdev, -1); */
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bc91fd867dcd..358fc26f8d1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -361,6 +361,7 @@ bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
+ data->sph_disable = 1;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 9a6d819b84ae..378b4dd826bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->tx_delay = tx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
}
@@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->rx_delay = rx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_config_dt;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index f9f80933e0c9..3fe720c5dc9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -298,6 +298,11 @@ static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
*art_time = ns;
}
+static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
+{
+ return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
+}
+
static int intel_crosststamp(ktime_t *device,
struct system_counterval_t *system,
void *ctx)
@@ -313,8 +318,6 @@ static int intel_crosststamp(ktime_t *device,
u32 num_snapshot;
u32 gpio_value;
u32 acr_value;
- int ret;
- u32 v;
int i;
if (!boot_cpu_has(X86_FEATURE_ART))
@@ -328,6 +331,8 @@ static int intel_crosststamp(ktime_t *device,
if (priv->plat->ext_snapshot_en)
return -EBUSY;
+ priv->plat->int_snapshot_en = 1;
+
mutex_lock(&priv->aux_ts_lock);
/* Enable Internal snapshot trigger */
acr_value = readl(ptpaddr + PTP_ACR);
@@ -347,6 +352,7 @@ static int intel_crosststamp(ktime_t *device,
break;
default:
mutex_unlock(&priv->aux_ts_lock);
+ priv->plat->int_snapshot_en = 0;
return -EINVAL;
}
writel(acr_value, ptpaddr + PTP_ACR);
@@ -368,13 +374,12 @@ static int intel_crosststamp(ktime_t *device,
gpio_value |= GMAC_GPO1;
writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
- /* Poll for time sync operation done */
- ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
- (v & GMAC_INT_TSIE), 100, 10000);
-
- if (ret == -ETIMEDOUT) {
- pr_err("%s: Wait for time sync operation timeout\n", __func__);
- return ret;
+ /* Time sync done Indication - Interrupt method */
+ if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
+ stmmac_cross_ts_isr(priv),
+ HZ / 100)) {
+ priv->plat->int_snapshot_en = 0;
+ return -ETIMEDOUT;
}
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
@@ -392,6 +397,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
+ priv->plat->int_snapshot_en = 0;
return 0;
}
@@ -576,6 +582,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_crossts = true;
plat->crosststamp = intel_crosststamp;
+ plat->int_snapshot_en = 0;
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
@@ -1072,13 +1079,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
- goto err_dvr_probe;
+ goto err_alloc_irq;
}
return 0;
-err_dvr_probe:
- pci_free_irq_vectors(pdev);
err_alloc_irq:
clk_disable_unprepare(plat->stmmac_clk);
clk_unregister_fixed_rate(plat->stmmac_clk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 6ff88df58767..ca8ab290013c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -576,32 +576,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
}
}
- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
- if (ret) {
- dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(plat->rmii_internal_clk);
- if (ret) {
- dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
- goto err_clk;
- }
-
return 0;
-
-err_clk:
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
- return ret;
-}
-
-static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct mediatek_dwmac_plat_data *plat = priv;
- const struct mediatek_dwmac_variant *variant = plat->variant;
-
- clk_disable_unprepare(plat->rmii_internal_clk);
- clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
}
static int mediatek_dwmac_clks_config(void *priv, bool enabled)
@@ -643,7 +618,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->addr64 = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
plat->init = mediatek_dwmac_init;
- plat->exit = mediatek_dwmac_exit;
plat->clks_config = mediatek_dwmac_clks_config;
if (priv_plat->variant->dwmac_fix_mac_speed)
plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
@@ -712,13 +686,32 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
mediatek_dwmac_init(pdev, priv_plat);
+ ret = mediatek_dwmac_clks_config(priv_plat, true);
+ if (ret)
+ return ret;
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret) {
stmmac_remove_config_dt(pdev, plat_dat);
- return ret;
+ goto err_drv_probe;
}
return 0;
+
+err_drv_probe:
+ mediatek_dwmac_clks_config(priv_plat, false);
+ return ret;
+}
+
+static int mediatek_dwmac_remove(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ mediatek_dwmac_clks_config(priv_plat, false);
+
+ return ret;
}
static const struct of_device_id mediatek_dwmac_match[] = {
@@ -733,7 +726,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
static struct platform_driver mediatek_dwmac_driver = {
.probe = mediatek_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = mediatek_dwmac_remove,
.driver = {
.name = "dwmac-mediatek",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 462ca7ed095a..71dad409f78b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -150,7 +150,8 @@
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
-#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
+#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
+ GMAC_INT_TSIE)
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index fd41db65fe1d..d8f1fbc25bdd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -23,6 +23,7 @@
static void dwmac4_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -58,6 +59,9 @@ static void dwmac4_core_init(struct mac_device_info *hw,
value |= GMAC_INT_FPE_EN;
writel(value, ioaddr + GMAC_INT_EN);
+
+ if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
+ init_waitqueue_head(&priv->tstamp_busy_wait);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -219,6 +223,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
if (queue == 0 || queue == 4) {
value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else if (queue > 4) {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
} else {
value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 57970ae2178d..f9e83964aa7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -266,6 +266,7 @@ struct stmmac_priv {
rwlock_t ptp_lock;
/* Protects auxiliary snapshot registers from concurrent access. */
struct mutex aux_ts_lock;
+ wait_queue_head_t tstamp_busy_wait;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index abfb3cd5958d..9c3055ee2608 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -803,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n");
- if (priv->hw->xpcs) {
- ret = xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- edata->eee_enabled);
- if (ret)
- return ret;
- }
-
if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 92d32940aff0..764832f4dae1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -179,6 +179,11 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
u64 ptp_time;
int i;
+ if (priv->plat->int_snapshot_en) {
+ wake_up(&priv->tstamp_busy_wait);
+ return;
+ }
+
tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
if (!tsync_int)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d1a7cf4567bc..c5f33630e771 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -834,19 +834,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0) {
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -3270,6 +3261,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
+ if (ptp_register) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ }
+
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_info(priv->dev, "PTP not supported by HW\n");
@@ -7213,8 +7212,6 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
pm_runtime_get_sync(dev);
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
@@ -7241,6 +7238,9 @@ int stmmac_dvr_remove(struct device *dev)
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 11e1055e8260..9f5cac4000da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret)
return ret;
- stmmac_init_tstamp_counter(priv, priv->systime_flags);
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e45fb191d8e6..4d11980dcd64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -175,11 +175,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
struct stmmac_priv *priv =
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
void __iomem *ptpaddr = priv->ptpaddr;
- void __iomem *ioaddr = priv->hw->pcsr;
struct stmmac_pps_cfg *cfg;
- u32 intr_value, acr_value;
int ret = -EOPNOTSUPP;
unsigned long flags;
+ u32 acr_value;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
@@ -213,19 +212,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
priv->plat->ext_snapshot_num >>
PTP_ACR_ATSEN_SHIFT);
- /* Enable Timestamp Interrupt */
- intr_value = readl(ioaddr + GMAC_INT_EN);
- intr_value |= GMAC_INT_TSIE;
- writel(intr_value, ioaddr + GMAC_INT_EN);
-
} else {
netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
priv->plat->ext_snapshot_num >>
PTP_ACR_ATSEN_SHIFT);
- /* Disable Timestamp Interrupt */
- intr_value = readl(ioaddr + GMAC_INT_EN);
- intr_value &= ~GMAC_INT_TSIE;
- writel(intr_value, ioaddr + GMAC_INT_EN);
}
writel(acr_value, ptpaddr + PTP_ACR);
mutex_unlock(&priv->aux_ts_lock);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 77e5dffb558f..8594ee839628 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -545,43 +545,24 @@ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
- printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
- if (hp->tcvr_type == external)
- printk("external ");
- else
- printk("internal ");
- printk("transceiver at ");
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
- if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
- if (hp->sw_lpa & LPA_100FULL)
- printk("100Mb/s, Full Duplex.\n");
- else
- printk("100Mb/s, Half Duplex.\n");
- } else {
- if (hp->sw_lpa & LPA_10FULL)
- printk("10Mb/s, Full Duplex.\n");
- else
- printk("10Mb/s, Half Duplex.\n");
- }
+
+ netdev_info(hp->dev,
+ "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
+ hp->tcvr_type == external ? "external" : "internal",
+ hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
+ hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
}
static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
- printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
- if (hp->tcvr_type == external)
- printk("external ");
- else
- printk("internal ");
- printk("transceiver at ");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
- if (hp->sw_bmcr & BMCR_SPEED100)
- printk("100Mb/s, ");
- else
- printk("10Mb/s, ");
- if (hp->sw_bmcr & BMCR_FULLDPLX)
- printk("Full Duplex.\n");
- else
- printk("Half Duplex.\n");
+
+ netdev_info(hp->dev,
+ "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
+ hp->tcvr_type == external ? "external" : "internal",
+ hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
+ hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
}
static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index fb92d4c1547d..f4a6b590a1e3 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2467,7 +2467,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
port->port_id, ret);
goto dl_port_unreg;
}
- devlink_port_type_eth_set(dl_port, port->ndev);
}
devlink_register(common->devlink);
return ret;
@@ -2511,6 +2510,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2527,6 +2527,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
}
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ return ret;
+
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
@@ -2539,25 +2543,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
i, ret);
goto err_cleanup_ndev;
}
+
+ dl_port = &port->devlink_port;
+ devlink_port_type_eth_set(dl_port, port->ndev);
}
ret = am65_cpsw_register_notifiers(common);
if (ret)
goto err_cleanup_ndev;
- ret = am65_cpsw_nuss_register_devlink(common);
- if (ret)
- goto clean_unregister_notifiers;
-
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
return 0;
-clean_unregister_notifiers:
- am65_cpsw_unregister_notifiers(common);
+
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
return ret;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 4225efbeda3d..f2e2261b4b7d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -547,6 +547,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
iowrite32(value, lp->regs + offset);
}
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+
+static inline void axienet_dma_out32(struct axienet_local *lp,
+ off_t reg, u32 value)
+{
+ iowrite32(value, lp->dma_regs + reg);
+}
+
+#if defined(CONFIG_64BIT) && defined(iowrite64)
+/**
+ * axienet_dma_out64 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out64(struct axienet_local *lp,
+ off_t reg, u64 value)
+{
+ iowrite64(value, lp->dma_regs + reg);
+}
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out64(lp, reg, addr);
+ else
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#else /* CONFIG_64BIT */
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#endif /* CONFIG_64BIT */
+
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
int axienet_mdio_enable(struct axienet_local *lp);
void axienet_mdio_disable(struct axienet_local *lp);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 93c9f305bba4..1760930ec0c4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -133,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
return ioread32(lp->dma_regs + reg);
}
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
-{
- iowrite32(value, lp->dma_regs + reg);
-}
-
-static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
- dma_addr_t addr)
-{
- axienet_dma_out32(lp, reg, lower_32_bits(addr));
-
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
-}
-
static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
struct axidma_bd *desc)
{
@@ -2061,6 +2037,11 @@ static int axienet_probe(struct platform_device *pdev)
iowrite32(0x0, desc);
}
}
+ if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
+ }
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 45c3c4a1101b..9fb567524220 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -99,6 +99,7 @@ struct sixpack {
unsigned int rx_count;
unsigned int rx_count_cooked;
+ spinlock_t rxlock;
int mtu; /* Our mtu (to spot changes!) */
int buffsize; /* Max buffers sizes */
@@ -565,6 +566,7 @@ static int sixpack_open(struct tty_struct *tty)
sp->dev = dev;
spin_lock_init(&sp->lock);
+ spin_lock_init(&sp->rxlock);
refcount_set(&sp->refcnt, 1);
init_completion(&sp->dead);
@@ -913,6 +915,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
sp->led_state = 0x60;
/* fill trailing bytes with zeroes */
sp->tty->ops->write(sp->tty, &sp->led_state, 1);
+ spin_lock_bh(&sp->rxlock);
rest = sp->rx_count;
if (rest != 0)
for (i = rest; i <= 3; i++)
@@ -930,6 +933,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
sp_bump(sp, 0);
}
sp->rx_count_cooked = 0;
+ spin_unlock_bh(&sp->rxlock);
}
break;
case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n");
@@ -959,8 +963,11 @@ sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
decode_prio_command(sp, inbyte);
else if ((inbyte & SIXP_STD_CMD_MASK) != 0)
decode_std_command(sp, inbyte);
- else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)
+ else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) {
+ spin_lock_bh(&sp->rxlock);
decode_data(sp, inbyte);
+ spin_unlock_bh(&sp->rxlock);
+ }
}
}
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index a8db1a19011b..c7047f5d7a9b 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -34,6 +34,8 @@
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
+#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
@@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev)
phydev->advertising))
reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
MDIO_AN_VEND_PROV_1000BASET_HALF |
- MDIO_AN_VEND_PROV_1000BASET_FULL, reg);
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 6a467e7817a6..59fe356942b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
.name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.suspend = at803x_suspend,
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
@@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = {
/* Qualcomm Atheros QCA9561 */
PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
.name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.suspend = at803x_suspend,
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
@@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = {
PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
.name = "Qualcomm QCA8081",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+ .remove = at803x_remove,
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 457896337505..0f1e617a26c9 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -88,8 +88,10 @@ static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
/* Reset PHY, otherwise MII_LPA will provide outdated information.
* This issue is reproducible only with some link partner PHYs
*/
- if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
- phydev->drv->soft_reset(phydev);
+ if (phydev->state == PHY_NOLINK) {
+ phy_init_hw(phydev);
+ phy_start_aneg(phydev);
+ }
}
static struct phy_driver asix_driver[] = {
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index e6ad3a494d32..8549e0e356c9 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -229,9 +229,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
if (misr_status < 0)
return misr_status;
- misr_status |= (DP83822_RX_ERR_HF_INT_EN |
- DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_LINK_STAT_INT_EN |
+ misr_status |= (DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8561f2d4443b..13dafe7a29bd 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -137,6 +137,7 @@
#define DP83867_DOWNSHIFT_2_COUNT 2
#define DP83867_DOWNSHIFT_4_COUNT 4
#define DP83867_DOWNSHIFT_8_COUNT 8
+#define DP83867_SGMII_AUTONEG_EN BIT(7)
/* CFG3 bits */
#define DP83867_CFG3_INT_OE BIT(7)
@@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev)
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
}
+static void dp83867_link_change_notify(struct phy_device *phydev)
+{
+ /* There is a limitation in DP83867 PHY device where SGMII AN is
+ * only triggered once after the device is booted up. Even after the
+ * PHY TPI is down and up again, SGMII AN is not triggered and
+ * hence no new in-band message from PHY to MAC side SGMII.
+ * This could cause an issue during power up, when PHY is up prior
+ * to MAC. At this condition, once MAC side SGMII is up, MAC side
+ * SGMII wouldn`t receive new in-band message from TI PHY with
+ * correct link status, speed and duplex info.
+ * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
+ * whenever there is a link change.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ int val = 0;
+
+ val = phy_clear_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ if (val < 0)
+ return;
+
+ phy_set_bits(phydev, DP83867_CFG2,
+ DP83867_SGMII_AUTONEG_EN);
+ }
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
+
+ .link_change_notify = dp83867_link_change_notify,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 58d602985877..8a2dbe849866 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void)
return ret;
}
-EXPORT_SYMBOL_GPL(mdio_bus_init);
#if IS_ENABLED(CONFIG_PHYLIB)
void mdio_bus_exit(void)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef62f357b76d..8d3ee3a6495b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/suspend.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/sock.h>
@@ -976,6 +977,28 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
struct phy_driver *drv = phydev->drv;
irqreturn_t ret;
+ /* Wakeup interrupts may occur during a system sleep transition.
+ * Postpone handling until the PHY has resumed.
+ */
+ if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) {
+ struct net_device *netdev = phydev->attached_dev;
+
+ if (netdev) {
+ struct device *parent = netdev->dev.parent;
+
+ if (netdev->wol_enabled)
+ pm_system_wakeup();
+ else if (device_may_wakeup(&netdev->dev))
+ pm_wakeup_dev_event(&netdev->dev, 0, true);
+ else if (parent && device_may_wakeup(parent))
+ pm_wakeup_dev_event(parent, 0, true);
+ }
+
+ phydev->irq_rerun = 1;
+ disable_irq_nosync(irq);
+ return IRQ_HANDLED;
+ }
+
mutex_lock(&phydev->lock);
ret = drv->handle_interrupt(phydev);
mutex_unlock(&phydev->lock);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 431a8719c635..46acddd865a7 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -278,6 +278,15 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
if (phydev->mac_managed_pm)
return 0;
+ /* Wakeup interrupts may occur during the system sleep transition when
+ * the PHY is inaccessible. Set flag to postpone handling until the PHY
+ * has resumed. Wait for concurrent interrupt handler to complete.
+ */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->irq_suspended = 1;
+ synchronize_irq(phydev->irq);
+ }
+
/* We must stop the state machine manually, otherwise it stops out of
* control, possibly with the phydev->lock held. Upon resume, netdev
* may call phy routines that try to grab the same lock, and that may
@@ -315,6 +324,20 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
if (ret < 0)
return ret;
no_resume:
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->irq_suspended = 0;
+ synchronize_irq(phydev->irq);
+
+ /* Rerun interrupts which were postponed by phy_interrupt()
+ * because they occurred during the system sleep transition.
+ */
+ if (phydev->irq_rerun) {
+ phydev->irq_rerun = 0;
+ enable_irq(phydev->irq);
+ irq_wake_thread(phydev->irq, phydev);
+ }
+ }
+
if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 9a5d5a10560f..e7b0e12cc75b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2516,7 +2516,7 @@ static int sfp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sfp);
- err = devm_add_action(sfp->dev, sfp_cleanup, sfp);
+ err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp);
if (err < 0)
return err;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 1b54684b68a0..96d3c40932d8 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -110,7 +110,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
struct smsc_phy_priv *priv = phydev->priv;
int rc;
- if (!priv->energy_enable)
+ if (!priv->energy_enable || phydev->irq != PHY_POLL)
return 0;
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -210,6 +210,8 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
* response on link pulses to detect presence of plugged Ethernet cable.
* The Energy Detect Power-Down mode is enabled again in the end of procedure to
* save approximately 220 mW of power if cable is unplugged.
+ * The workaround is only applicable to poll mode. Energy Detect Power-Down may
+ * not be used in interrupt mode lest link change detection becomes unreliable.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
@@ -217,7 +219,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
int err = genphy_read_status(phydev);
- if (!phydev->link && priv->energy_enable) {
+ if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
/* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 87a635aac008..259b2b84b2b3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -273,6 +273,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
}
}
+static void tun_napi_enable(struct tun_file *tfile)
+{
+ if (tfile->napi_enabled)
+ napi_enable(&tfile->napi);
+}
+
static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
@@ -634,7 +640,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tfile);
+ if (!tfile->detached)
+ tun_napi_disable(tfile);
tun_napi_del(tfile);
}
@@ -653,8 +660,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
- } else
+ } else {
tun_disable_queue(tun, tfile);
+ tun_napi_disable(tfile);
+ }
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
@@ -727,6 +736,7 @@ static void tun_detach_all(struct net_device *dev)
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ tun_napi_del(tfile);
tun_enable_queue(tfile);
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -807,6 +817,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
if (tfile->detached) {
tun_enable_queue(tfile);
+ tun_napi_enable(tfile);
} else {
sock_hold(&tfile->sk);
tun_napi_init(tun, tfile, napi, napi_frags);
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 2c81236c6c7c..45d3cc5cc355 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -126,8 +126,7 @@
AX_MEDIUM_RE)
#define AX88772_MEDIUM_DEFAULT \
- (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
- AX_MEDIUM_TFC | AX_MEDIUM_PS | \
+ (AX_MEDIUM_FD | AX_MEDIUM_PS | \
AX_MEDIUM_AC | AX_MEDIUM_RE)
/* AX88772 & AX88178 RX_CTL values */
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 632fa6c1d5e3..b4a1b7abcfc9 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -431,6 +431,7 @@ void asix_adjust_link(struct net_device *netdev)
asix_write_medium_mode(dev, mode, 0);
phy_print_status(phydev);
+ usbnet_link_change(dev, phydev->link, 0);
}
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 7a8c11a26eb5..ac2d400d1d6c 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1472,6 +1472,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* are bundled into this buffer and where we can find an array of
* per-packet metadata (which contains elements encoded into u16).
*/
+
+ /* SKB contents for current firmware:
+ * <packet 1> <padding>
+ * ...
+ * <packet N> <padding>
+ * <per-packet metadata entry 1> <dummy header>
+ * ...
+ * <per-packet metadata entry N> <dummy header>
+ * <padding2> <rx_hdr>
+ *
+ * where:
+ * <packet N> contains pkt_len bytes:
+ * 2 bytes of IP alignment pseudo header
+ * packet received
+ * <per-packet metadata entry N> contains 4 bytes:
+ * pkt_len and fields AX_RXHDR_*
+ * <padding> 0-7 bytes to terminate at
+ * 8 bytes boundary (64-bit).
+ * <padding2> 4 bytes to make rx_hdr terminate at
+ * 8 bytes boundary (64-bit)
+ * <dummy-header> contains 4 bytes:
+ * pkt_len=0 and AX_RXHDR_DROP_ERR
+ * <rx-hdr> contains 4 bytes:
+ * pkt_cnt and hdr_off (offset of
+ * <per-packet metadata entry 1>)
+ *
+ * pkt_cnt is number of entrys in the per-packet metadata.
+ * In current firmware there is 2 entrys per packet.
+ * The first points to the packet and the
+ * second is a dummy header.
+ * This was done probably to align fields in 64-bit and
+ * maintain compatibility with old firmware.
+ * This code assumes that <dummy header> and <padding2> are
+ * optional.
+ */
+
if (skb->len < 4)
return 0;
skb_trim(skb, skb->len - 4);
@@ -1485,51 +1521,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Make sure that the bounds of the metadata array are inside the SKB
* (and in front of the counter at the end).
*/
- if (pkt_cnt * 2 + hdr_off > skb->len)
+ if (pkt_cnt * 4 + hdr_off > skb->len)
return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
/* Packets must not overlap the metadata array */
skb_trim(skb, hdr_off);
- for (; ; pkt_cnt--, pkt_hdr++) {
+ for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
+ u16 pkt_len_plus_padd;
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+ pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
- if (pkt_len > skb->len)
+ /* Skip dummy header used for alignment
+ */
+ if (pkt_len == 0)
+ continue;
+
+ if (pkt_len_plus_padd > skb->len)
return 0;
/* Check CRC or runt packet */
- if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
- pkt_len >= 2 + ETH_HLEN) {
- bool last = (pkt_cnt == 0);
-
- if (last) {
- ax_skb = skb;
- } else {
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (!ax_skb)
- return 0;
- }
- ax_skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
- skb_set_tail_pointer(ax_skb, ax_skb->len);
- ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(ax_skb, pkt_hdr);
+ if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
+ pkt_len < 2 + ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ skb_pull(skb, pkt_len_plus_padd);
+ continue;
+ }
- if (last)
- return 1;
+ /* last packet */
+ if (pkt_len_plus_padd == skb->len) {
+ skb_trim(skb, pkt_len);
- usbnet_skb_return(dev, ax_skb);
+ /* Skip IP alignment pseudo header */
+ skb_pull(skb, 2);
+
+ skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
+ ax88179_rx_checksum(skb, pkt_hdr);
+ return 1;
}
- /* Trim this packet away from the SKB */
- if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
return 0;
+ skb_trim(ax_skb, pkt_len);
+
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+
+ skb->truesize = pkt_len_plus_padd +
+ SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+
+ skb_pull(skb, pkt_len_plus_padd);
}
+
+ return 0;
}
static struct sk_buff *
@@ -1750,7 +1801,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1763,7 +1814,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1776,7 +1827,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1789,7 +1840,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1802,7 +1853,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1815,7 +1866,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1828,7 +1879,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1841,7 +1892,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1854,7 +1905,7 @@ static const struct driver_info toshiba_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1867,7 +1918,7 @@ static const struct driver_info mct_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1880,7 +1931,7 @@ static const struct driver_info at_umc2000_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1893,7 +1944,7 @@ static const struct driver_info at_umc200_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@@ -1906,7 +1957,7 @@ static const struct driver_info at_umc2000sp_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index e7fe9c0f63a9..1a376ed45d7a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -781,7 +781,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
intf->altsetting->desc.bInterfaceNumber, 1)) {
dev_err(dev, "Can't set altsetting 1.\n");
ret = -EIO;
- goto fail_mem;;
+ goto fail_mem;
}
netdev = alloc_etherdev(sizeof(struct catc));
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7389d6ef8569..0f6efaabaa32 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "12"
/* Information for net */
-#define NET_VERSION "12"
+#define NET_VERSION "13"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2156,7 +2156,7 @@ static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)
}
static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
- struct sk_buff *skb, u32 len, u32 transport_offset)
+ struct sk_buff *skb, u32 len)
{
u32 mss = skb_shinfo(skb)->gso_size;
u32 opts1, opts2 = 0;
@@ -2167,6 +2167,8 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
opts1 = len | TX_FS | TX_LS;
if (mss) {
+ u32 transport_offset = (u32)skb_transport_offset(skb);
+
if (transport_offset > GTTCPHO_MAX) {
netif_warn(tp, tx_err, tp->netdev,
"Invalid transport offset 0x%x for TSO\n",
@@ -2197,6 +2199,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
opts1 |= transport_offset << GTTCPHO_SHIFT;
opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u32 transport_offset = (u32)skb_transport_offset(skb);
u8 ip_protocol;
if (transport_offset > TCPHO_MAX) {
@@ -2260,7 +2263,6 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int len;
- u32 offset;
skb = __skb_dequeue(&skb_head);
if (!skb)
@@ -2276,9 +2278,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
tx_data = tx_agg_align(tx_data);
tx_desc = (struct tx_desc *)tx_data;
- offset = (u32)skb_transport_offset(skb);
-
- if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+ if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) {
r8152_csum_workaround(tp, skb, &skb_head);
continue;
}
@@ -2759,9 +2759,9 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
{
u32 mss = skb_shinfo(skb)->gso_size;
int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
- int offset = skb_transport_offset(skb);
- if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) &&
+ skb_transport_offset(skb) > max_offset)
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
features &= ~NETIF_F_GSO_MASK;
@@ -5917,7 +5917,8 @@ static void r8153_enter_oob(struct r8152 *tp)
wait_oob_link_list_ready(tp);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu));
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
switch (tp->version) {
case RTL_VER_03:
@@ -5953,6 +5954,10 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -6555,6 +6560,9 @@ static void rtl8156_down(struct r8152 *tp)
rtl_disable(tp);
rtl_reset_bmu(tp);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
+
/* Clear teredo wake event. bit[15:8] is the teredo wakeup
* type. Set it to zero. bits[7:0] are the W1C bits about
* the events. Set them to all 1 to clear them.
@@ -6565,6 +6573,10 @@ static void rtl8156_down(struct r8152 *tp)
ocp_data |= NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ ocp_data |= MCU_BORW_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
rtl_rx_vlan_en(tp, true);
rxdy_gated_en(tp, false);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1cb6dab3e2d0..78a92751ce4c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -2004,7 +2004,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (size) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmalloc(size, GFP_NOIO);
if (!buf)
goto out;
}
@@ -2036,7 +2036,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
cmd, reqtype, value, index, size);
if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_NOIO);
if (!buf)
goto out;
} else {
@@ -2137,7 +2137,7 @@ static void usbnet_async_cmd_cb(struct urb *urb)
int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
u16 value, u16 index, const void *data, u16 size)
{
- struct usb_ctrlrequest *req = NULL;
+ struct usb_ctrlrequest *req;
struct urb *urb;
int err = -ENOMEM;
void *buf = NULL;
@@ -2155,7 +2155,7 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
if (!buf) {
netdev_err(dev->net, "Error allocating buffer"
" in %s!\n", __func__);
- goto fail_free;
+ goto fail_free_urb;
}
}
@@ -2179,14 +2179,21 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
if (err < 0) {
netdev_err(dev->net, "Error submitting the control"
" message: status=%d\n", err);
- goto fail_free;
+ goto fail_free_all;
}
return 0;
+fail_free_all:
+ kfree(req);
fail_free_buf:
kfree(buf);
-fail_free:
- kfree(req);
+ /*
+ * avoid a double free
+ * needed because the flag can be set only
+ * after filling the URB
+ */
+ urb->transfer_flags = 0;
+fail_free_urb:
usb_free_urb(urb);
fail:
return err;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 466da01ba2e3..2cb833b3006a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct netdev_queue *queue = NULL;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
rq = &rcv_priv->rq[rxq];
+ queue = netdev_get_tx_queue(dev, rxq);
/* The napi pointer is available when an XDP program is
* attached or when GRO is enabled
@@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (queue)
+ txq_trans_cond_update(queue);
if (!use_napi)
dev_lstats_add(dev, length);
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index db05b5e930be..356cf8dd4164 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2768,7 +2768,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int i;
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
@@ -2776,14 +2775,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
-
- if (netif_running(vi->dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
- }
+ if (netif_running(vi->dev))
+ virtnet_close(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -2791,7 +2784,7 @@ static int init_vqs(struct virtnet_info *vi);
static int virtnet_restore_up(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- int err, i;
+ int err;
err = init_vqs(vi);
if (err)
@@ -2800,15 +2793,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
if (netif_running(vi->dev)) {
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ err = virtnet_open(vi->dev);
+ if (err)
+ return err;
}
netif_tx_lock_bh(vi->dev);
@@ -3655,14 +3642,20 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
- err = register_netdev(dev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+ err = register_netdevice(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
+ rtnl_unlock();
goto free_failover;
}
virtio_device_ready(vdev);
+ rtnl_unlock();
+
err = virtnet_cpu_notif_add(vi);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 84d1c7054013..7b1dc19c565e 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3822,7 +3822,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 2f746eb64507..6f83af849f2e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4912,6 +4912,8 @@ static int hwsim_virtio_probe(struct virtio_device *vdev)
if (err)
return err;
+ virtio_device_ready(vdev);
+
err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);
if (err)
goto out_remove;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index c02be4ac159e..7db627fc26be 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -1233,9 +1233,6 @@ struct rtw_chip_info {
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
- /* for 8821c set channel */
- u32 ch_param[3];
-
/* coex paras */
u32 coex_para_ver;
u8 bt_desired_ver;
@@ -1937,6 +1934,9 @@ struct rtw_hal {
enum rtw_sar_bands sar_band;
struct rtw_sar sar;
+
+ /* for 8821c set channel */
+ u32 ch_param[3];
};
struct rtw_path_div {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index ffee39ea5df6..488a7ddd507c 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -125,6 +125,7 @@ static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev)
static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u8 crystal_cap, val;
/* power on BB/RF domain */
@@ -159,9 +160,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
/* post init after header files config */
rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
- rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
- rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
- rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
+ hal->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
+ hal->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
+ hal->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
rtw_phy_init(rtwdev);
rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f;
@@ -351,6 +352,7 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
+ struct rtw_hal *hal = &rtwdev->hal;
u32 val32;
if (channel <= 14) {
@@ -367,11 +369,11 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667);
} else {
rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD,
- rtwdev->chip->ch_param[0]);
+ hal->ch_param[0]);
rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD,
- rtwdev->chip->ch_param[1] & MASKLWORD);
+ hal->ch_param[1] & MASKLWORD);
rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD,
- rtwdev->chip->ch_param[2]);
+ hal->ch_param[2]);
}
} else if (channel > 35) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index dbac4c03d21a..a0335407be42 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) &&
+ !skb_queue_empty(&queue->rx_queue) &&
work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue);
work_done++;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8c0b9546d5a2..2409007f1fd9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+static bool __read_mostly xennet_trusted = true;
+module_param_named(trusted, xennet_trusted, bool, 0644);
+MODULE_PARM_DESC(trusted, "Is the backend trusted");
+
#define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops;
@@ -173,6 +177,9 @@ struct netfront_info {
/* Is device behaving sane? */
bool broken;
+ /* Should skbs be bounced into a zeroed buffer? */
+ bool bounce;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -271,7 +278,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
if (unlikely(!skb))
return NULL;
- page = page_pool_dev_alloc_pages(queue->page_pool);
+ page = page_pool_alloc_pages(queue->page_pool,
+ GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
if (unlikely(!page)) {
kfree_skb(skb);
return NULL;
@@ -665,6 +673,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
return nxmit;
}
+struct sk_buff *bounce_skb(const struct sk_buff *skb)
+{
+ unsigned int headerlen = skb_headroom(skb);
+ /* Align size to allocate full pages and avoid contiguous data leaks */
+ unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
+ XEN_PAGE_SIZE);
+ struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
+
+ if (!n)
+ return NULL;
+
+ if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
+ WARN_ONCE(1, "misaligned skb allocated\n");
+ kfree_skb(n);
+ return NULL;
+ }
+
+ /* Set the data pointer */
+ skb_reserve(n, headerlen);
+ /* Set the tail pointer and length */
+ skb_put(n, skb->len);
+
+ BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
+
+ skb_copy_header(n, skb);
+ return n;
+}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
@@ -718,9 +753,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* The first req should be at least ETH_HLEN size or the packet will be
* dropped by netback.
+ *
+ * If the backend is not trusted bounce all data to zeroed pages to
+ * avoid exposing contiguous data on the granted page not belonging to
+ * the skb.
*/
- if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
- nskb = skb_copy(skb, GFP_ATOMIC);
+ if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = bounce_skb(skb);
if (!nskb)
goto drop;
dev_consume_skb_any(skb);
@@ -1053,8 +1092,10 @@ static int xennet_get_responses(struct netfront_queue *queue,
}
}
rcu_read_unlock();
-next:
+
__skb_queue_tail(list, skb);
+
+next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
@@ -2214,6 +2255,10 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
+ /* Check if backend is trusted. */
+ info->bounce = !xennet_trusted ||
+ !xenbus_read_unsigned(dev->nodename, "trusted", 1);
+
/* Check if backend supports multiple queues */
max_queues = xenbus_read_unsigned(info->xbdev->otherend,
"multi-queue-max-queues", 1);
@@ -2381,6 +2426,9 @@ static int xennet_connect(struct net_device *dev)
return err;
if (np->netback_has_xdp_headroom)
pr_info("backend supports XDP headroom\n");
+ if (np->bounce)
+ dev_info(&np->xbdev->dev,
+ "bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index ceef81d93ac9..01329b91d59d 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -167,9 +167,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
pdata->irq_polarity = IRQF_TRIGGER_RISING;
ret = irq_of_parse_and_map(node, 0);
- if (ret < 0) {
- pr_err("Unable to get irq, error: %d\n", ret);
- return ret;
+ if (!ret) {
+ pr_err("Unable to get irq\n");
+ return -EINVAL;
}
pdata->irq = ret;
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index a38e2fcdfd39..ad3359a4942c 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -115,9 +115,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node,
}
ret = irq_of_parse_and_map(node, 0);
- if (ret < 0) {
- pr_err("Unable to get irq, error: %d\n", ret);
- return ret;
+ if (!ret) {
+ pr_err("Unable to get irq\n");
+ return -EINVAL;
}
pdata->irq = ret;
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index a99aedff795d..ea7309453096 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
int err;
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ usb_anchor_urb(urb, &drv_data->tx_anchor);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err) {
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
+ }
drv_data->tx_in_flight++;
+ usb_free_urb(urb);
+ }
+
+ /* Cleanup the rest deferred urbs. */
+ while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
+ kfree(urb->setup_packet);
+ usb_free_urb(urb);
}
- usb_scuttle_anchored_urbs(&drv_data->deferred);
}
static int nfcmrvl_resume(struct usb_interface *intf)
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 7e451c10985d..ae2ba08d8ac3 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -122,7 +122,9 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy,
skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN);
r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len);
- if (r != frame_len) {
+ if (r < 0) {
+ goto fw_read_exit_free_skb;
+ } else if (r != frame_len) {
nfc_err(&client->dev,
"Invalid frame length: %u (expected %zu)\n",
r, frame_len);
@@ -162,8 +164,13 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
+ if (!header.plen)
+ return 0;
+
r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
- if (r != header.plen) {
+ if (r < 0) {
+ goto nci_read_exit_free_skb;
+ } else if (r != header.plen) {
nfc_err(&client->dev,
"Invalid frame payload length: %u (expected %u)\n",
r, header.plen);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 7e213f8ddc98..df8d27cf2956 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
int r = 0;
struct device *dev = &hdev->ndev->dev;
struct nfc_evt_transaction *transaction;
+ u32 aid_len;
+ u8 params_len;
pr_debug("connectivity gate event: %x\n", event);
@@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
r = nfc_se_connectivity(hdev->ndev, host);
break;
case ST21NFCA_EVT_TRANSACTION:
- /*
- * According to specification etsi 102 622
+ /* According to specification etsi 102 622
* 11.2.2.4 EVT_TRANSACTION Table 52
* Description Tag Length
* AID 81 5 to 16
* PARAMETERS 82 0 to 255
+ *
+ * The key differences are aid storage length is variably sized
+ * in the packet, but fixed in nfc_evt_transaction, and that the aid_len
+ * is u8 in the packet, but u32 in the structure, and the tags in
+ * the packet are not included in nfc_evt_transaction.
+ *
+ * size in bytes: 1 1 5-16 1 1 0-255
+ * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
+ * member name: aid_tag(M) aid_len aid params_tag(M) params_len params
+ * example: 0x81 5-16 X 0x82 0-255 X
*/
- if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
- skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+ if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
- if (!transaction)
- return -ENOMEM;
-
- transaction->aid_len = skb->data[1];
+ aid_len = skb->data[1];
- /* Checking if the length of the AID is valid */
- if (transaction->aid_len > sizeof(transaction->aid))
- return -EINVAL;
+ if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
+ return -EPROTO;
- memcpy(transaction->aid, &skb->data[2],
- transaction->aid_len);
+ params_len = skb->data[aid_len + 3];
- /* Check next byte is PARAMETERS tag (82) */
- if (skb->data[transaction->aid_len + 2] !=
- NFC_EVT_TRANSACTION_PARAMS_TAG)
+ /* Verify PARAMETERS tag is (82), and final check that there is enough
+ * space in the packet to read everything.
+ */
+ if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
+ (skb->len < aid_len + 4 + params_len))
return -EPROTO;
- transaction->params_len = skb->data[transaction->aid_len + 3];
+ transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
- /* Total size is allocated (skb->len - 2) minus fixed array members */
- if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
- return -EINVAL;
+ transaction->aid_len = aid_len;
+ transaction->params_len = params_len;
- memcpy(transaction->params, skb->data +
- transaction->aid_len + 4, transaction->params_len);
+ memcpy(transaction->aid, &skb->data[2], aid_len);
+ memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
r = nfc_se_transaction(hdev->ndev, host, transaction);
break;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index a4fc17db707c..b38d0355b0ac 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -176,8 +176,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */
- if (ctx->phys < nd_region->ndr_start
- || (ctx->phys + ctx->cleared) > ndr_end)
+ if (ctx->phys < nd_region->ndr_start ||
+ (ctx->phys + ctx->cleared - 1) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 24165daee3c8..6a12a906a11e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2546,6 +2546,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x1e0f,
.mn = "KCD6XVUL6T40",
.quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * The external Samsung X5 SSD fails initialization without a
+ * delay before checking if it is ready and has a whole set of
+ * other problems. To make this even more interesting, it
+ * shares the PCI ID with internal Samsung 970 Evo Plus that
+ * does not need or want these quirks.
+ */
+ .vid = 0x144d,
+ .mn = "Samsung Portable SSD X5",
+ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN,
}
};
@@ -3285,8 +3299,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
* we have no UUID set
*/
if (uuid_is_null(&ids->uuid)) {
- printk_ratelimited(KERN_WARNING
- "No UUID available providing old NGUID\n");
+ dev_warn_ratelimited(dev,
+ "No UUID available providing old NGUID\n");
return sysfs_emit(buf, "%pU\n", ids->nguid);
}
return sysfs_emit(buf, "%pU\n", &ids->uuid);
@@ -3772,7 +3786,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_ns_ids *ids)
+ unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3796,6 +3810,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->subsys = ctrl->subsys;
head->ns_id = nsid;
head->ids = *ids;
+ head->shared = is_shared;
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3863,6 +3878,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
if (ret) {
dev_err(ctrl->device,
"globally duplicate IDs for nsid %d\n", nsid);
+ nvme_print_device_info(ctrl);
return ret;
}
@@ -3876,12 +3892,11 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
nsid);
goto out_unlock;
}
- head = nvme_alloc_ns_head(ctrl, nsid, ids);
+ head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
- head->shared = is_shared;
} else {
ret = -EINVAL;
if (!is_shared || !head->shared) {
@@ -4580,6 +4595,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
+ if (ctrl->ops->stop_ctrl)
+ ctrl->ops->stop_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9b72b6ecf33c..5558f8812157 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -502,7 +502,9 @@ struct nvme_ctrl_ops {
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+ void (*print_device_info)(struct nvme_ctrl *ctrl);
};
/*
@@ -548,6 +550,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
}
+/*
+ * Return the length of the string without the space padding
+ */
+static inline int nvme_strlen(char *s, int len)
+{
+ while (s[len - 1] == ' ')
+ len--;
+ return len;
+}
+
+static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
+{
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ if (ctrl->ops->print_device_info) {
+ ctrl->ops->print_device_info(ctrl);
+ return;
+ }
+
+ dev_err(ctrl->device,
+ "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
+ nvme_strlen(subsys->model, sizeof(subsys->model)),
+ subsys->model, nvme_strlen(subsys->firmware_rev,
+ sizeof(subsys->firmware_rev)),
+ subsys->firmware_rev);
+}
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
const char *dev_name);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 48f4f6eb877b..58c72d55769a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
csts, result);
+
+ if (csts != ~0)
+ return;
+
+ dev_warn(dev->ctrl.device,
+ "Does your device have a faulty power saving mode enabled?\n");
+ dev_warn(dev->ctrl.device,
+ "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
}
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
@@ -2682,8 +2690,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
struct pci_dev *pdev = to_pci_dev(dev->dev);
mutex_lock(&dev->shutdown_lock);
- if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) {
- u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ if (pci_is_enabled(pdev)) {
+ u32 csts;
+
+ if (pci_device_is_present(pdev))
+ csts = readl(dev->bar + NVME_REG_CSTS);
+ else
+ csts = ~0;
if (dev->ctrl.state == NVME_CTRL_LIVE ||
dev->ctrl.state == NVME_CTRL_RESETTING) {
@@ -2976,6 +2989,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
}
+
+static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
+{
+ struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ dev_err(ctrl->device,
+ "VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
+ pdev->vendor, pdev->device,
+ nvme_strlen(subsys->model, sizeof(subsys->model)),
+ subsys->model, nvme_strlen(subsys->firmware_rev,
+ sizeof(subsys->firmware_rev)),
+ subsys->firmware_rev);
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
@@ -2987,6 +3015,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
+ .print_device_info = nvme_pci_print_device_info,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -3421,7 +3450,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
- .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_NO_NS_DESC_LIST, },
@@ -3437,22 +3467,40 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
@@ -3463,6 +3511,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
@@ -3483,10 +3535,6 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS |
NVME_QUIRK_SKIP_CID_GEN },
- { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY|
- NVME_QUIRK_NO_DEEPEST_PS |
- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f2a5e1ea508a..46c2dcf72f7e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1048,6 +1048,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
}
}
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+}
+
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -2252,9 +2260,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&ctrl->err_work);
- cancel_delayed_work_sync(&ctrl->reconnect_work);
-
nvme_rdma_teardown_io_queues(ctrl, shutdown);
nvme_stop_admin_queue(&ctrl->ctrl);
if (shutdown)
@@ -2304,6 +2309,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
+ .stop_ctrl = nvme_rdma_stop_ctrl,
};
/*
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bb67538d241b..7a9e6ffa2342 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1180,8 +1180,7 @@ done:
} else if (ret < 0) {
dev_err(queue->ctrl->ctrl.device,
"failed to send request %d\n", ret);
- if (ret != -EPIPE && ret != -ECONNRESET)
- nvme_tcp_fail_request(queue->request);
+ nvme_tcp_fail_request(queue->request);
nvme_tcp_done_send_req(queue);
}
return ret;
@@ -2194,9 +2193,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
-
nvme_tcp_teardown_io_queues(ctrl, shutdown);
nvme_stop_admin_queue(ctrl);
if (shutdown)
@@ -2236,6 +2232,12 @@ out_fail:
nvme_tcp_reconnect_or_remove(ctrl);
}
+static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+ cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+ cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+}
+
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
@@ -2557,6 +2559,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvmf_get_address,
+ .stop_ctrl = nvme_tcp_stop_ctrl,
};
static bool
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index b5f85259461a..37c7f4c89f92 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -69,7 +69,7 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->metadata = !!blk_integrity_rq(req);
__entry->fctype = cmd->fabrics.fctype;
__assign_disk_name(__entry->disk, req->q->disk);
- memcpy(__entry->cdw10, &cmd->common.cdw10,
+ memcpy(__entry->cdw10, &cmd->common.cdws,
sizeof(__entry->cdw10));
),
TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index e44b2988759e..ff77c3d2354f 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int clear_ids;
+
+ if (kstrtouint(page, 0, &clear_ids))
+ return -EINVAL;
+ subsys->clear_ids = clear_ids;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
static struct configfs_attribute *nvmet_passthru_attrs[] = {
&nvmet_passthru_attr_device_path,
&nvmet_passthru_attr_enable,
&nvmet_passthru_attr_admin_timeout,
&nvmet_passthru_attr_io_timeout,
+ &nvmet_passthru_attr_clear_ids,
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 90e75324dae0..c27660a660d9 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
ctrl->port = req->port;
ctrl->ops = req->ops;
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ /* By default, set loop targets to clear IDS by default */
+ if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+ subsys->clear_ids = 1;
+#endif
+
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..2b3e5719f24e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -249,6 +249,7 @@ struct nvmet_subsys {
struct config_group passthru_group;
unsigned int admin_timeout;
unsigned int io_timeout;
+ unsigned int clear_ids;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
#ifdef CONFIG_BLK_DEV_ZONED
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b1f7efab3918..6f39a29828b1 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
ctrl->cap &= ~(1ULL << 43);
}
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ int pos, len;
+ bool csi_seen = false;
+ void *data;
+ u8 csi;
+
+ if (!ctrl->subsys->clear_ids)
+ return status;
+
+ data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!data)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ if (status)
+ goto out_free;
+
+ for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+ struct nvme_ns_id_desc *cur = data + pos;
+
+ if (cur->nidl == 0)
+ break;
+ if (cur->nidt == NVME_NIDT_CSI) {
+ memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
+ csi_seen = true;
+ break;
+ }
+ len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
+ }
+
+ memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
+ if (csi_seen) {
+ struct nvme_ns_id_desc *cur = data;
+
+ cur->nidt = NVME_NIDT_CSI;
+ cur->nidl = NVME_NIDT_CSI_LEN;
+ memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
+ }
+ status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+out_free:
+ kfree(data);
+ return status;
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
*/
id->mc = 0;
+ if (req->sq->ctrl->subsys->clear_ids) {
+ memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+ memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+ }
+
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
out_free:
@@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
case NVME_ID_CNS_NS:
nvmet_passthru_override_id_ns(req);
break;
+ case NVME_ID_CNS_NS_DESC_LIST:
+ nvmet_passthru_override_id_descs(req);
+ break;
}
} else if (status < 0)
status = NVME_SC_INTERNAL;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 2793554e622e..0a9542599ad1 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -405,7 +405,7 @@ err:
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -413,23 +413,6 @@ static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
crypto_ahash_digest(hash);
}
-static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
- struct nvmet_tcp_cmd *cmd)
-{
- struct scatterlist sg;
- struct kvec *iov;
- int i;
-
- crypto_ahash_init(hash);
- for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
- sg_init_one(&sg, iov->iov_base, iov->iov_len);
- ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
- crypto_ahash_update(hash);
- }
- ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
- crypto_ahash_final(hash);
-}
-
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -454,7 +437,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -1137,7 +1120,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index f52960d2dfbe..bff144c97e66 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,7 +32,7 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_AMD
- tristate "AMD GPIO pin control"
+ bool "AMD GPIO pin control"
depends on HAS_IOMEM
depends on ACPI || COMPILE_TEST
select GPIOLIB
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index c94e24aadf92..83d47ff1cea8 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -236,11 +236,11 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
const struct aspeed_sig_expr **funcs;
const struct aspeed_sig_expr ***prios;
- pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
-
if (!pdesc)
return -EINVAL;
+ pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
+
prios = pdesc->prios;
if (!prios)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx93.c b/drivers/pinctrl/freescale/pinctrl-imx93.c
index c0630f69e995..417e41b37a6f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx93.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx93.c
@@ -239,6 +239,7 @@ static const struct pinctrl_pin_desc imx93_pinctrl_pads[] = {
static const struct imx_pinctrl_soc_info imx93_pinctrl_info = {
.pins = imx93_pinctrl_pads,
.npins = ARRAY_SIZE(imx93_pinctrl_pads),
+ .flags = ZERO_OFFSET_VALID,
.gpr_compatible = "fsl,imx93-iomuxc-gpr",
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index a140b6bfbfaa..bcde042d29dc 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
struct device *dev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- spinlock_t irq_lock;
+ raw_spinlock_t irq_lock;
struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev;
struct armada_37xx_pin_group *groups;
@@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
writel(d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static void armada_37xx_irq_mask(struct irq_data *d)
@@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
writel(val & ~d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static void armada_37xx_irq_unmask(struct irq_data *d)
@@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
writel(val | d->mask, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
unsigned long flags;
armada_37xx_irq_update_reg(&reg, d);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
val = readl(info->base + reg);
if (on)
val |= (BIT(d->hwirq % GPIO_PER_REG));
else
val &= ~(BIT(d->hwirq % GPIO_PER_REG));
writel(val, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0;
}
@@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
u32 val, reg = IRQ_POL;
unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
armada_37xx_irq_update_reg(&reg, d);
val = readl(info->base + reg);
switch (type) {
@@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
default:
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL;
}
writel(val, info->base + reg);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0;
}
@@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
p = readl(info->base + IRQ_POL + 4 * reg_idx);
if ((p ^ l) & (1 << bit_num)) {
/*
@@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
ret = -1;
}
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return ret;
}
@@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 status;
unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
while (status) {
u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq +
@@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
update_status:
/* Update status in case a new IRQ appears */
- spin_lock_irqsave(&info->irq_lock, flags);
+ raw_spin_lock_irqsave(&info->irq_lock, flags);
status = readl_relaxed(info->base +
IRQ_STATUS + 4 * i);
/* Manage only the interrupt that was enabled */
status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
- spin_unlock_irqrestore(&info->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&info->irq_lock, flags);
}
}
chained_irq_exit(chip, desc);
@@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device *dev = &pdev->dev;
unsigned int i, nr_irq_parent;
- spin_lock_init(&info->irq_lock);
+ raw_spin_lock_init(&info->irq_lock);
nr_irq_parent = of_irq_count(np);
if (!nr_irq_parent) {
@@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
{ },
};
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .use_raw_spinlock = true,
+};
+
static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
{
struct armada_37xx_pinctrl *info;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct regmap *regmap;
+ void __iomem *base;
int ret;
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to ioremap base address: %pe\n", base);
+ return PTR_ERR(base);
+ }
+
+ regmap = devm_regmap_init_mmio(dev, base,
+ &armada_37xx_pinctrl_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to create regmap: %pe\n", regmap);
+ return PTR_ERR(regmap);
+ }
+
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
-
- regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
info->regmap = regmap;
-
info->data = of_device_get_match_data(dev);
ret = armada_37xx_pinctrl_register(pdev, info);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 5f4a8c5c6650..dfc8ea9f3843 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -29,19 +29,12 @@
#define ocelot_clrsetbits(addr, clear, set) \
writel((readl(addr) & ~(clear)) | (set), (addr))
-/* PINCONFIG bits (sparx5 only) */
enum {
PINCONF_BIAS,
PINCONF_SCHMITT,
PINCONF_DRIVE_STRENGTH,
};
-#define BIAS_PD_BIT BIT(4)
-#define BIAS_PU_BIT BIT(3)
-#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT)
-#define SCHMITT_BIT BIT(2)
-#define DRIVE_BITS GENMASK(1, 0)
-
/* GPIO standard registers */
#define OCELOT_GPIO_OUT_SET 0x0
#define OCELOT_GPIO_OUT_CLR 0x4
@@ -321,6 +314,13 @@ struct ocelot_pin_caps {
unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
};
+struct ocelot_pincfg_data {
+ u8 pd_bit;
+ u8 pu_bit;
+ u8 drive_bits;
+ u8 schmitt_bit;
+};
+
struct ocelot_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
@@ -328,10 +328,16 @@ struct ocelot_pinctrl {
struct regmap *map;
struct regmap *pincfg;
struct pinctrl_desc *desc;
+ const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
};
+struct ocelot_match_data {
+ struct pinctrl_desc desc;
+ struct ocelot_pincfg_data pincfg_data;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 regcfg;
- ret = regmap_read(info->pincfg, pin, &regcfg);
+ ret = regmap_read(info->pincfg,
+ pin * regmap_get_reg_stride(info->pincfg),
+ &regcfg);
if (ret)
return ret;
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- *val = regcfg & BIAS_BITS;
+ *val = regcfg & (opd->pd_bit | opd->pu_bit);
break;
case PINCONF_SCHMITT:
- *val = regcfg & SCHMITT_BIT;
+ *val = regcfg & opd->schmitt_bit;
break;
case PINCONF_DRIVE_STRENGTH:
- *val = regcfg & DRIVE_BITS;
+ *val = regcfg & opd->drive_bits;
break;
default:
@@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
u32 val;
int ret;
- ret = regmap_read(info->pincfg, regaddr, &val);
+ ret = regmap_read(info->pincfg,
+ regaddr * regmap_get_reg_stride(info->pincfg),
+ &val);
if (ret)
return ret;
val &= ~clrbits;
val |= setbits;
- ret = regmap_write(info->pincfg, regaddr, val);
+ ret = regmap_write(info->pincfg,
+ regaddr * regmap_get_reg_stride(info->pincfg),
+ val);
return ret;
}
@@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ opd->pd_bit | opd->pu_bit,
val);
break;
case PINCONF_SCHMITT:
- ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ opd->schmitt_bit,
val);
break;
case PINCONF_DRIVE_STRENGTH:
if (val <= 3)
ret = ocelot_pincfg_clrsetbits(info, pin,
- DRIVE_BITS, val);
+ opd->drive_bits,
+ val);
else
ret = -EINVAL;
break;
@@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
if (param == PIN_CONFIG_BIAS_DISABLE)
val = (val == 0);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
- val = (val & BIAS_PD_BIT ? true : false);
+ val = !!(val & info->pincfg_data->pd_bit);
else /* PIN_CONFIG_BIAS_PULL_UP */
- val = (val & BIAS_PU_BIT ? true : false);
+ val = !!(val & info->pincfg_data->pu_bit);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ if (!info->pincfg_data->schmitt_bit)
+ return -EOPNOTSUPP;
+
err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
if (err)
return err;
- val = (val & SCHMITT_BIT ? true : false);
+ val = !!(val & info->pincfg_data->schmitt_bit);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
@@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ const struct ocelot_pincfg_data *opd = info->pincfg_data;
u32 param, arg, p;
int cfg, err = 0;
@@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
- (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
- BIAS_PD_BIT;
+ (param == PIN_CONFIG_BIAS_PULL_UP) ?
+ opd->pu_bit : opd->pd_bit;
err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
if (err)
@@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- arg = arg ? SCHMITT_BIT : 0;
+ if (!opd->schmitt_bit)
+ return -EOPNOTSUPP;
+
+ arg = arg ? opd->schmitt_bit : 0;
err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
arg);
if (err)
@@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-static struct pinctrl_desc luton_desc = {
- .name = "luton-pinctrl",
- .pins = luton_pins,
- .npins = ARRAY_SIZE(luton_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data luton_desc = {
+ .desc = {
+ .name = "luton-pinctrl",
+ .pins = luton_pins,
+ .npins = ARRAY_SIZE(luton_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc serval_desc = {
- .name = "serval-pinctrl",
- .pins = serval_pins,
- .npins = ARRAY_SIZE(serval_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data serval_desc = {
+ .desc = {
+ .name = "serval-pinctrl",
+ .pins = serval_pins,
+ .npins = ARRAY_SIZE(serval_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc ocelot_desc = {
- .name = "ocelot-pinctrl",
- .pins = ocelot_pins,
- .npins = ARRAY_SIZE(ocelot_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data ocelot_desc = {
+ .desc = {
+ .name = "ocelot-pinctrl",
+ .pins = ocelot_pins,
+ .npins = ARRAY_SIZE(ocelot_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc jaguar2_desc = {
- .name = "jaguar2-pinctrl",
- .pins = jaguar2_pins,
- .npins = ARRAY_SIZE(jaguar2_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data jaguar2_desc = {
+ .desc = {
+ .name = "jaguar2-pinctrl",
+ .pins = jaguar2_pins,
+ .npins = ARRAY_SIZE(jaguar2_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc servalt_desc = {
- .name = "servalt-pinctrl",
- .pins = servalt_pins,
- .npins = ARRAY_SIZE(servalt_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data servalt_desc = {
+ .desc = {
+ .name = "servalt-pinctrl",
+ .pins = servalt_pins,
+ .npins = ARRAY_SIZE(servalt_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .owner = THIS_MODULE,
+ },
};
-static struct pinctrl_desc sparx5_desc = {
- .name = "sparx5-pinctrl",
- .pins = sparx5_pins,
- .npins = ARRAY_SIZE(sparx5_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &ocelot_pmx_ops,
- .confops = &ocelot_confops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data sparx5_desc = {
+ .desc = {
+ .name = "sparx5-pinctrl",
+ .pins = sparx5_pins,
+ .npins = ARRAY_SIZE(sparx5_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &ocelot_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+ },
+ .pincfg_data = {
+ .pd_bit = BIT(4),
+ .pu_bit = BIT(3),
+ .drive_bits = GENMASK(1, 0),
+ .schmitt_bit = BIT(2),
+ },
};
-static struct pinctrl_desc lan966x_desc = {
- .name = "lan966x-pinctrl",
- .pins = lan966x_pins,
- .npins = ARRAY_SIZE(lan966x_pins),
- .pctlops = &ocelot_pctl_ops,
- .pmxops = &lan966x_pmx_ops,
- .confops = &ocelot_confops,
- .owner = THIS_MODULE,
+static struct ocelot_match_data lan966x_desc = {
+ .desc = {
+ .name = "lan966x-pinctrl",
+ .pins = lan966x_pins,
+ .npins = ARRAY_SIZE(lan966x_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &lan966x_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+ },
+ .pincfg_data = {
+ .pd_bit = BIT(3),
+ .pu_bit = BIT(2),
+ .drive_bits = GENMASK(1, 0),
+ },
};
static int ocelot_create_group_func_map(struct device *dev,
@@ -1890,7 +1939,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{},
};
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+ const struct ocelot_pinctrl *info)
{
void __iomem *base;
@@ -1898,7 +1948,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = 32,
+ .max_register = info->desc->npins * 4,
.name = "pincfg",
};
@@ -1913,6 +1963,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
static int ocelot_pinctrl_probe(struct platform_device *pdev)
{
+ const struct ocelot_match_data *data;
struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info;
struct reset_control *reset;
@@ -1929,7 +1980,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
+ GFP_KERNEL);
+ if (!info->desc)
+ return -ENOMEM;
+
+ info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
if (IS_ERR(reset))
@@ -1956,7 +2016,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */
if (info->desc->confops) {
- pincfg = ocelot_pinctrl_create_pincfg(pdev);
+ pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
if (IS_ERR(pincfg))
dev_dbg(dev, "Failed to create pincfg regmap\n");
else
diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c
index 63429a287434..770862f45b3f 100644
--- a/drivers/pinctrl/ralink/pinctrl-ralink.c
+++ b/drivers/pinctrl/ralink/pinctrl-ralink.c
@@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
p->func[i]->pin_count,
sizeof(int),
GFP_KERNEL);
+ if (!p->func[i]->pins)
+ return -ENOMEM;
for (j = 0; j < p->func[i]->pin_count; j++)
p->func[i]->pins[j] = p->func[i]->pin_first + j;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 57a33fb0f2d7..14bcca73238a 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1338,16 +1338,18 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
bank->secure_control = pctl->match_data->secure_control;
spin_lock_init(&bank->lock);
- /* create irq hierarchical domain */
- bank->fwnode = fwnode;
+ if (pctl->domain) {
+ /* create irq hierarchical domain */
+ bank->fwnode = fwnode;
- bank->domain = irq_domain_create_hierarchy(pctl->domain, 0,
- STM32_GPIO_IRQ_LINE, bank->fwnode,
- &stm32_gpio_domain_ops, bank);
+ bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE,
+ bank->fwnode, &stm32_gpio_domain_ops,
+ bank);
- if (!bank->domain) {
- err = -ENODEV;
- goto err_clk;
+ if (!bank->domain) {
+ err = -ENODEV;
+ goto err_clk;
+ }
}
err = gpiochip_add_data(&bank->gpio_chip, bank);
@@ -1510,6 +1512,8 @@ int stm32_pctl_probe(struct platform_device *pdev)
pctl->domain = stm32_pctrl_get_irq_domain(pdev);
if (IS_ERR(pctl->domain))
return PTR_ERR(pctl->domain);
+ if (!pctl->domain)
+ dev_warn(dev, "pinctrl without interrupt support\n");
/* hwspinlock is optional */
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index 3ba47040ac42..2b3335ab56c6 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
}
*map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+ if (*map == NULL)
+ return -ENOMEM;
+
for (i = 0; i < (*num_maps); i++) {
dt_pin = be32_to_cpu(list[i]);
pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 4ada80317a3b..b5c1a8f363f3 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -158,26 +158,26 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQS */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand")), /* CE2 */
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand")), /* CE3 */
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index d9327d7d56ee..dd928402af99 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -544,6 +544,8 @@ static int sunxi_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
int i;
+ pin -= pctl->desc->pin_base;
+
for (i = 0; i < num_configs; i++) {
enum pin_config_param param;
unsigned long flags;
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 72df4b8f4dd8..09c7829e95c4 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -85,7 +85,7 @@ config NVSW_SN2201
depends on I2C
depends on REGMAP_I2C
help
- This driver provides support for the Nvidia SN2201 platfom.
+ This driver provides support for the Nvidia SN2201 platform.
The SN2201 is a highly integrated for one rack unit system with
L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28
ports in a compact 1RU form factor. The system also including a
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
index 0bcdc7c75007..7b9c107c17ce 100644
--- a/drivers/platform/mellanox/nvsw-sn2201.c
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -326,7 +326,7 @@ static struct resource nvsw_sn2201_lpc_res[] = {
};
/* SN2201 I2C platform data. */
-struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
+static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
.irq = NVSW_SN2201_CPLD_SYSIRQ,
};
@@ -890,6 +890,7 @@ nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
int size)
{
struct mlxreg_hotplug_device *dev = devs;
+ int ret;
int i;
/* Create I2C static devices. */
@@ -901,6 +902,7 @@ nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
dev->nr, dev->brdinfo->addr);
dev->adapter = NULL;
+ ret = PTR_ERR(dev->client);
goto fail_create_static_devices;
}
}
@@ -914,7 +916,7 @@ fail_create_static_devices:
dev->client = NULL;
dev->adapter = NULL;
}
- return IS_ERR(dev->client);
+ return ret;
}
static void nvsw_sn2201_destroy_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index d421e1482395..6b51ad01f791 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES
if MIPS_PLATFORM_DEVICES
config CPU_HWMON
- tristate "Loongson-3 CPU HWMon Driver"
+ bool "Loongson-3 CPU HWMon Driver"
depends on MACH_LOONGSON64
select HWMON
default y
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f08ad85683cb..bc4013e950ed 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -945,6 +945,8 @@ config PANASONIC_LAPTOP
tristate "Panasonic Laptop Extras"
depends on INPUT && ACPI
depends on BACKLIGHT_CLASS_DEVICE
+ depends on ACPI_VIDEO=n || ACPI_VIDEO
+ depends on SERIO_I8042 || SERIO_I8042 = n
select INPUT_SPARSEKMAP
help
This driver adds support for access to backlight control and hotkeys
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index f11d18beac18..700eb19e8450 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -91,6 +91,8 @@
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
#define AMD_CPU_ID_YC 0x14B5
+#define AMD_CPU_ID_CB 0x14D8
+#define AMD_CPU_ID_PS 0x14E8
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
@@ -318,6 +320,8 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
break;
case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
break;
default:
@@ -491,7 +495,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&amd_pmc_idlemask_fops);
/* Enable STB only when the module_param is set */
if (enable_stb) {
- if (dev->cpu_id == AMD_CPU_ID_YC)
+ if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB ||
+ dev->cpu_id == AMD_CPU_ID_PS)
debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
&amd_pmc_stb_debugfs_fops_v2);
else
@@ -615,6 +620,8 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
return MSG_OS_HINT_PCO;
case AMD_CPU_ID_RN:
case AMD_CPU_ID_YC:
+ case AMD_CPU_ID_CB:
+ case AMD_CPU_ID_PS:
return MSG_OS_HINT_RN;
}
return -EINVAL;
@@ -735,6 +742,8 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
#endif
static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
@@ -877,7 +886,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
- if (enable_stb && dev->cpu_id == AMD_CPU_ID_YC) {
+ if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
err = amd_pmc_s2d_init(dev);
if (err)
return err;
@@ -915,6 +924,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0005", 0},
{"AMDI0006", 0},
{"AMDI0007", 0},
+ {"AMDI0008", 0},
{"AMD0004", 0},
{"AMD0005", 0},
{ }
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 57a07db659cb..478dd300b9c9 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -522,6 +522,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
{ KE_KEY, 0x35, { KEY_SCREENLOCK } },
+ { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
@@ -574,6 +575,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
{ KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
{ KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
{ KE_KEY, 0xB5, { KEY_CALC } },
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index 05534287bc26..8dd672339485 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -405,11 +405,14 @@ MODULE_DEVICE_TABLE(dmi, dmi_ids);
static int __init p50_module_init(void)
{
struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1);
+ int ret;
if (!dmi_first_match(dmi_ids))
return -ENODEV;
- platform_driver_register(&p50_gpio_driver);
+ ret = platform_driver_register(&p50_gpio_driver);
+ if (ret)
+ return ret;
gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1);
if (IS_ERR(gpio_pdev)) {
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 1ef606e3ef80..5e7e6659a849 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
}}
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
@@ -149,6 +150,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z490 AORUS ELITE AC"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
@@ -156,6 +158,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
{ }
};
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 667f94bba905..bc7020e9df9e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -38,6 +38,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
/* DMI board names of devices that should use the omen specific path for
* thermal profiles.
@@ -88,6 +89,7 @@ enum hp_wmi_event_ids {
HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
+ HPWMI_SANITIZATION_MODE = 0x17,
};
/*
@@ -220,6 +222,7 @@ static struct input_dev *hp_wmi_input_dev;
static struct platform_device *hp_wmi_platform_dev;
static struct platform_profile_handler platform_profile_handler;
static bool platform_profile_support;
+static bool zero_insize_support;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
@@ -290,14 +293,16 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
struct bios_return *bios_return;
union acpi_object *obj = NULL;
struct bios_args *args = NULL;
- int mid, actual_outsize, ret;
+ int mid, actual_insize, actual_outsize;
size_t bios_args_size;
+ int ret;
mid = encode_outsize_for_pvsz(outsize);
if (WARN_ON(mid < 0))
return mid;
- bios_args_size = struct_size(args, data, insize);
+ actual_insize = max(insize, 128);
+ bios_args_size = struct_size(args, data, actual_insize);
args = kmalloc(bios_args_size, GFP_KERNEL);
if (!args)
return -ENOMEM;
@@ -374,7 +379,7 @@ static int hp_wmi_read_int(int query)
int val = 0, ret;
ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
- 0, sizeof(val));
+ zero_if_sup(val), sizeof(val));
if (ret)
return ret < 0 ? ret : -EINVAL;
@@ -410,7 +415,8 @@ static int hp_wmi_get_tablet_mode(void)
return -ENODEV;
ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
- system_device_mode, 0, sizeof(system_device_mode));
+ system_device_mode, zero_if_sup(system_device_mode),
+ sizeof(system_device_mode));
if (ret < 0)
return ret;
@@ -497,7 +503,7 @@ static int hp_wmi_fan_speed_max_get(void)
int val = 0, ret;
ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
- &val, 0, sizeof(val));
+ &val, zero_if_sup(val), sizeof(val));
if (ret)
return ret < 0 ? ret : -EINVAL;
@@ -509,7 +515,7 @@ static int __init hp_wmi_bios_2008_later(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ zero_if_sup(state), sizeof(state));
if (!ret)
return 1;
@@ -520,7 +526,7 @@ static int __init hp_wmi_bios_2009_later(void)
{
u8 state[128];
int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ zero_if_sup(state), sizeof(state));
if (!ret)
return 1;
@@ -598,7 +604,7 @@ static int hp_wmi_rfkill2_refresh(void)
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ zero_if_sup(state), sizeof(state));
if (err)
return err;
@@ -848,6 +854,8 @@ static void hp_wmi_notify(u32 value, void *context)
break;
case HPWMI_BATTERY_CHARGE_PERIOD:
break;
+ case HPWMI_SANITIZATION_MODE:
+ break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
@@ -1007,7 +1015,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ zero_if_sup(state), sizeof(state));
if (err)
return err < 0 ? err : -EINVAL;
@@ -1483,11 +1491,15 @@ static int __init hp_wmi_init(void)
{
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
- int err;
+ int err, tmp = 0;
if (!bios_capable && !event_capable)
return -ENODEV;
+ if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
+ sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
+ zero_insize_support = true;
+
if (event_capable) {
err = hp_wmi_input_setup();
if (err)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 3ccb7b71dfb1..abd0c81d62c4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -152,6 +152,10 @@ static bool no_bt_rfkill;
module_param(no_bt_rfkill, bool, 0444);
MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
+static bool allow_v4_dytc;
+module_param(allow_v4_dytc, bool, 0444);
+MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support.");
+
/*
* ACPI Helpers
*/
@@ -871,12 +875,18 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
{
/* Ideapad 5 Pro 16ACH6 */
- .ident = "LENOVO 82L5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82L5")
}
},
+ {
+ /* Ideapad 5 15ITL05 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad 5 15ITL05")
+ }
+ },
{}
};
@@ -901,13 +911,16 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
- if (dytc_version < 5) {
- if (dytc_version < 4 || !dmi_check_system(ideapad_dytc_v4_allow_table)) {
- dev_info(&priv->platform_device->dev,
- "DYTC_VERSION is less than 4 or is not allowed: %d\n",
- dytc_version);
- return -ENODEV;
- }
+ if (dytc_version < 4) {
+ dev_info(&priv->platform_device->dev, "DYTC_VERSION < 4 is not supported\n");
+ return -ENODEV;
+ }
+
+ if (dytc_version < 5 &&
+ !(allow_v4_dytc || dmi_check_system(ideapad_dytc_v4_allow_table))) {
+ dev_info(&priv->platform_device->dev,
+ "DYTC_VERSION 4 support may not work. Pass ideapad_laptop.allow_v4_dytc=Y on the kernel commandline to enable\n");
+ return -ENODEV;
}
priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL);
diff --git a/drivers/platform/x86/intel/atomisp2/led.c b/drivers/platform/x86/intel/atomisp2/led.c
index 5935dfca166f..10077a61d8c5 100644
--- a/drivers/platform/x86/intel/atomisp2/led.c
+++ b/drivers/platform/x86/intel/atomisp2/led.c
@@ -50,7 +50,8 @@ static const struct dmi_system_id atomisp2_led_systems[] __initconst = {
{
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ /* Non exact match to also match T100TAF */
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
.driver_data = &asus_t100ta_lookup,
},
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 216d31e3403d..79cff1fc675c 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -122,6 +122,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig
index 7ce896434b8f..c341a27cc1a3 100644
--- a/drivers/platform/x86/intel/ifs/Kconfig
+++ b/drivers/platform/x86/intel/ifs/Kconfig
@@ -1,6 +1,9 @@
config INTEL_IFS
tristate "Intel In Field Scan"
depends on X86 && CPU_SUP_INTEL && 64BIT && SMP
+ # Discussion on the list has shown that the sysfs API needs a bit
+ # more work, mark this as broken for now
+ depends on BROKEN
select INTEL_IFS_DEVICE
help
Enable support for the In Field Scan capability in select
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index edaf22e5ae98..a1fe1e0dcf4a 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1911,7 +1911,9 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map),
{}
};
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index 34daf9df168b..ace1239bc0a0 100644
--- a/drivers/platform/x86/intel/pmt/crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -282,7 +282,7 @@ static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
auxiliary_set_drvdata(auxdev, priv);
for (i = 0; i < intel_vsec_dev->num_resources; i++) {
- struct intel_pmt_entry *entry = &priv->entry[i].entry;
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry;
ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
if (ret < 0)
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 37850d07987d..615e39cbbbf1 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -119,20 +119,22 @@
* - v0.1 start from toshiba_acpi driver written by John Belmonte
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/ctype.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-
+#include <linux/seq_file.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <acpi/video.h>
MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
MODULE_AUTHOR("David Bronaugh <dbronaugh@linuxboxen.org>");
@@ -241,6 +243,42 @@ struct pcc_acpi {
struct platform_device *platform;
};
+/*
+ * On some Panasonic models the volume up / down / mute keys send duplicate
+ * keypress events over the PS/2 kbd interface, filter these out.
+ */
+static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (data == 0xe0) {
+ extended = true;
+ return true;
+ } else if (extended) {
+ extended = false;
+
+ switch (data & 0x7f) {
+ case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */
+ case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */
+ case 0x30: /* e0 30 / e0 b0, Volume Up press / release */
+ return true;
+ default:
+ /*
+ * Report the previously filtered e0 before continuing
+ * with the next non-filtered byte.
+ */
+ serio_interrupt(port, 0xe0, 0);
+ return false;
+ }
+ }
+
+ return false;
+}
+
/* method access functions */
static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
{
@@ -762,6 +800,8 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
struct input_dev *hotk_input_dev = pcc->input_dev;
int rc;
unsigned long long result;
+ unsigned int key;
+ unsigned int updown;
rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
NULL, &result);
@@ -770,20 +810,27 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
return;
}
+ key = result & 0xf;
+ updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */
+
/* hack: some firmware sends no key down for sleep / hibernate */
- if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) {
- if (result & 0x80)
+ if (key == 7 || key == 10) {
+ if (updown)
sleep_keydown_seen = 1;
if (!sleep_keydown_seen)
sparse_keymap_report_event(hotk_input_dev,
- result & 0xf, 0x80, false);
+ key, 0x80, false);
}
- if ((result & 0xf) == 0x7 || (result & 0xf) == 0x9 || (result & 0xf) == 0xa) {
- if (!sparse_keymap_report_event(hotk_input_dev,
- result & 0xf, result & 0x80, false))
- pr_err("Unknown hotkey event: 0x%04llx\n", result);
- }
+ /*
+ * Don't report brightness key-presses if they are also reported
+ * by the ACPI video bus.
+ */
+ if ((key == 1 || key == 2) && acpi_video_handles_brightness_key_presses())
+ return;
+
+ if (!sparse_keymap_report_event(hotk_input_dev, key, updown, false))
+ pr_err("Unknown hotkey event: 0x%04llx\n", result);
}
static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
@@ -997,6 +1044,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->platform = NULL;
}
+ i8042_install_filter(panasonic_i8042_filter);
return 0;
out_platform:
@@ -1020,6 +1068,8 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device)
if (!device || !pcc)
return -EINVAL;
+ i8042_remove_filter(panasonic_i8042_filter);
+
if (pcc->platform) {
device_remove_file(&pcc->platform->dev, &dev_attr_cdpower);
platform_device_unregister(pcc->platform);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e6cb4a14cdd4..a8b383051528 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4529,6 +4529,7 @@ static void thinkpad_acpi_amd_s2idle_restore(void)
iounmap(addr);
cleanup_resource:
release_resource(res);
+ kfree(res);
}
static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
@@ -10299,21 +10300,15 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_DISABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 0)
#define DYTC_ENABLE_CQL DYTC_SET_COMMAND(DYTC_FUNCTION_CQL, DYTC_MODE_MMC_BALANCE, 1)
-enum dytc_profile_funcmode {
- DYTC_FUNCMODE_NONE = 0,
- DYTC_FUNCMODE_MMC,
- DYTC_FUNCMODE_PSC,
-};
-
-static enum dytc_profile_funcmode dytc_profile_available;
static enum platform_profile_option dytc_current_profile;
static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
+static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
{
- if (dytc_profile_available == DYTC_FUNCMODE_MMC) {
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
switch (dytcmode) {
case DYTC_MODE_MMC_LOWPOWER:
*profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10330,7 +10325,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
}
return 0;
}
- if (dytc_profile_available == DYTC_FUNCMODE_PSC) {
+ if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
switch (dytcmode) {
case DYTC_MODE_PSC_LOWPOWER:
*profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10352,21 +10347,21 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
{
switch (profile) {
case PLATFORM_PROFILE_LOW_POWER:
- if (dytc_profile_available == DYTC_FUNCMODE_MMC)
+ if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_LOWPOWER;
- else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+ else if (dytc_capabilities & BIT(DYTC_FC_PSC))
*perfmode = DYTC_MODE_PSC_LOWPOWER;
break;
case PLATFORM_PROFILE_BALANCED:
- if (dytc_profile_available == DYTC_FUNCMODE_MMC)
+ if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_BALANCE;
- else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+ else if (dytc_capabilities & BIT(DYTC_FC_PSC))
*perfmode = DYTC_MODE_PSC_BALANCE;
break;
case PLATFORM_PROFILE_PERFORMANCE:
- if (dytc_profile_available == DYTC_FUNCMODE_MMC)
+ if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_PERFORM;
- else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+ else if (dytc_capabilities & BIT(DYTC_FC_PSC))
*perfmode = DYTC_MODE_PSC_PERFORM;
break;
default: /* Unknown profile */
@@ -10445,7 +10440,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
if (err)
goto unlock;
- if (dytc_profile_available == DYTC_FUNCMODE_MMC) {
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
if (profile == PLATFORM_PROFILE_BALANCED) {
/*
* To get back to balanced mode we need to issue a reset command.
@@ -10464,7 +10459,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
goto unlock;
}
}
- if (dytc_profile_available == DYTC_FUNCMODE_PSC) {
+ if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
if (err)
goto unlock;
@@ -10483,12 +10478,12 @@ static void dytc_profile_refresh(void)
int perfmode;
mutex_lock(&dytc_mutex);
- if (dytc_profile_available == DYTC_FUNCMODE_MMC) {
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
if (dytc_mmc_get_available)
err = dytc_command(DYTC_CMD_MMC_GET, &output);
else
err = dytc_cql_command(DYTC_CMD_GET, &output);
- } else if (dytc_profile_available == DYTC_FUNCMODE_PSC)
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC))
err = dytc_command(DYTC_CMD_GET, &output);
mutex_unlock(&dytc_mutex);
@@ -10517,7 +10512,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
set_bit(PLATFORM_PROFILE_BALANCED, dytc_profile.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, dytc_profile.choices);
- dytc_profile_available = DYTC_FUNCMODE_NONE;
err = dytc_command(DYTC_CMD_QUERY, &output);
if (err)
return err;
@@ -10530,13 +10524,12 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
return -ENODEV;
/* Check what capabilities are supported */
- err = dytc_command(DYTC_CMD_FUNC_CAP, &output);
+ err = dytc_command(DYTC_CMD_FUNC_CAP, &dytc_capabilities);
if (err)
return err;
- if (output & BIT(DYTC_FC_MMC)) { /* MMC MODE */
- dytc_profile_available = DYTC_FUNCMODE_MMC;
-
+ if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
+ pr_debug("MMC is supported\n");
/*
* Check if MMC_GET functionality available
* Version > 6 and return success from MMC_GET command
@@ -10547,8 +10540,13 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (!err && ((output & DYTC_ERR_MASK) == DYTC_ERR_SUCCESS))
dytc_mmc_get_available = true;
}
- } else if (output & BIT(DYTC_FC_PSC)) { /* PSC MODE */
- dytc_profile_available = DYTC_FUNCMODE_PSC;
+ } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
+ /* Support for this only works on AMD platforms */
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+ dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
+ return -ENODEV;
+ }
+ pr_debug("PSC is supported\n");
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
return -ENODEV;
@@ -10574,7 +10572,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
static void dytc_profile_exit(void)
{
- dytc_profile_available = DYTC_FUNCMODE_NONE;
platform_profile_remove();
}
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index f446be72e539..480375977435 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -27,8 +27,8 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/lp855x.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
#include <linux/power/bq24190_charger.h>
+#include <linux/reboot.h>
#include <linux/rmi.h>
#include <linux/serdev.h>
#include <linux/spi/spi.h>
@@ -889,6 +889,7 @@ static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map =
"INT33FC:02", "pmu_clk2_grp", "pmu_clk");
static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl;
+static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler;
static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
{
@@ -933,9 +934,11 @@ err_put_device:
* followed by a normal 3 second press to recover. Avoid this by doing an EFI
* poweroff instead.
*/
-static void lenovo_yoga_tab2_830_1050_power_off(void)
+static int lenovo_yoga_tab2_830_1050_power_off(struct sys_off_data *data)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+
+ return NOTIFY_DONE;
}
static int __init lenovo_yoga_tab2_830_1050_init(void)
@@ -950,13 +953,19 @@ static int __init lenovo_yoga_tab2_830_1050_init(void)
if (ret)
return ret;
- pm_power_off = lenovo_yoga_tab2_830_1050_power_off;
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ lenovo_yoga_tab2_830_1050_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
+ lenovo_yoga_tab2_830_1050_power_off, NULL);
+ if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler))
+ return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler);
+
return 0;
}
static void lenovo_yoga_tab2_830_1050_exit(void)
{
- pm_power_off = NULL; /* Just turn poweroff into halt on module unload */
+ unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler);
if (lenovo_yoga_tab2_830_1050_codec_pinctrl) {
pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c
index 08d0a07b58ef..c7624d7611a7 100644
--- a/drivers/power/reset/arm-versatile-reboot.c
+++ b/drivers/power/reset/arm-versatile-reboot.c
@@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void)
versatile_reboot_type = (enum versatile_reboot)reboot_id->data;
syscon_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(syscon_regmap))
return PTR_ERR(syscon_regmap);
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index ec8a404d71b4..4339fa9ff009 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -3148,6 +3148,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_init_hw_registers(di);
if (ret) {
dev_err(dev, "failed to initialize registers\n");
+ destroy_workqueue(di->fg_wq);
return ret;
}
@@ -3159,6 +3160,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
if (IS_ERR(di->fg_psy)) {
dev_err(dev, "failed to register FG psy\n");
+ destroy_workqueue(di->fg_wq);
return PTR_ERR(di->fg_psy);
}
@@ -3174,8 +3176,10 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
- if (irq < 0)
+ if (irq < 0) {
+ destroy_workqueue(di->fg_wq);
return irq;
+ }
ret = devm_request_threaded_irq(dev, irq, NULL,
ab8500_fg_irq[i].isr,
@@ -3185,6 +3189,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq[i].name, irq, ret);
+ destroy_workqueue(di->fg_wq);
return ret;
}
dev_dbg(dev, "Requested %s IRQ %d: %d\n",
@@ -3200,6 +3205,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_sysfs_init(di);
if (ret) {
dev_err(dev, "failed to create sysfs entry\n");
+ destroy_workqueue(di->fg_wq);
return ret;
}
@@ -3207,6 +3213,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to create FG psy\n");
ab8500_fg_sysfs_exit(di);
+ destroy_workqueue(di->fg_wq);
return ret;
}
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index fad5890c899e..470253c337c7 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -846,17 +846,17 @@ int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *t
{
int i, high, low;
- /* Break loop at table_len - 1 because that is the highest index */
- for (i = 0; i < table_len - 1; i++)
+ for (i = 0; i < table_len; i++)
if (temp > table[i].temp)
break;
/* The library function will deal with high == low */
- if ((i == 0) || (i == (table_len - 1)))
- high = i;
+ if (i == 0)
+ high = low = i;
+ else if (i == table_len)
+ high = low = i - 1;
else
- high = i - 1;
- low = i;
+ high = (low = i) - 1;
return fixp_linear_interpolate(table[low].temp,
table[low].resistance,
@@ -958,17 +958,17 @@ int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
{
int i, high, low;
- /* Break loop at table_len - 1 because that is the highest index */
- for (i = 0; i < table_len - 1; i++)
+ for (i = 0; i < table_len; i++)
if (ocv > table[i].ocv)
break;
/* The library function will deal with high == low */
- if ((i == 0) || (i == (table_len - 1)))
- high = i - 1;
+ if (i == 0)
+ high = low = i;
+ else if (i == table_len)
+ high = low = i - 1;
else
- high = i; /* i.e. i == 0 */
- low = i;
+ high = (low = i) - 1;
return fixp_linear_interpolate(table[low].ocv,
table[low].capacity,
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 7dff94a2eb7e..ef6e47d025ca 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -723,19 +723,19 @@ static const struct regulator_desc pms405_pldo600 = {
static const struct regulator_desc mp5496_smpa2 = {
.linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(725000, 0, 27, 12500),
+ REGULATOR_LINEAR_RANGE(600000, 0, 127, 12500),
},
.n_linear_ranges = 1,
- .n_voltages = 28,
+ .n_voltages = 128,
.ops = &rpm_mp5496_ops,
};
static const struct regulator_desc mp5496_ldoa2 = {
.linear_ranges = (struct linear_range[]) {
- REGULATOR_LINEAR_RANGE(1800000, 0, 60, 25000),
+ REGULATOR_LINEAR_RANGE(800000, 0, 127, 25000),
},
.n_linear_ranges = 1,
- .n_voltages = 61,
+ .n_voltages = 128,
.ops = &rpm_mp5496_ops,
};
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index cb2491761958..ae1d6ee382a5 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -60,7 +60,7 @@ static LIST_HEAD(sclp_reg_list);
/* List of queued requests. */
static LIST_HEAD(sclp_req_queue);
-/* Data for read and and init requests. */
+/* Data for read and init requests. */
static struct sclp_req sclp_read_req;
static struct sclp_req sclp_init_req;
static void *sclp_read_sccb;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1b85f21c151b..95cfdeab5baf 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1438,7 +1438,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
if (ap_drv->in_use) {
rc = ap_drv->in_use(ap_perms.apm, newaqm);
if (rc)
- return -EBUSY;
+ rc = -EBUSY;
}
/* release the driver's module */
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 51bef226d33f..aa96f67dd0b1 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1138,8 +1138,13 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vcdev->err = -EIO;
}
virtio_ccw_check_activity(vcdev, activity);
- /* Interrupts are disabled here */
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
+ /*
+ * Paired with virtio_ccw_synchronize_cbs() and interrupts are
+ * disabled here.
+ */
read_lock(&vcdev->irq_lock);
+#endif
for_each_set_bit(i, indicators(vcdev),
sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
@@ -1148,7 +1153,9 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
vq = virtio_ccw_vq_by_ind(vcdev, i);
vring_interrupt(0, vq);
}
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
read_unlock(&vcdev->irq_lock);
+#endif
if (test_bit(0, indicators2(vcdev))) {
virtio_config_changed(&vcdev->vdev);
clear_bit(0, indicators2(vcdev));
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 7d819fc0395e..eb86afb21aab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2782,6 +2782,7 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
struct hisi_hba *hisi_hba = shost_priv(shost);
struct device *dev = hisi_hba->dev;
int ret = sas_slave_configure(sdev);
+ unsigned int max_sectors;
if (ret)
return ret;
@@ -2799,6 +2800,12 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
}
}
+ /* Set according to IOMMU IOVA caching limit */
+ max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
+ (PAGE_SIZE * 32) >> SECTOR_SHIFT);
+
+ blk_queue_max_hw_sectors(sdev->request_queue, max_sectors);
+
return 0;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d0eab5700dc5..00684e11976b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
static const char *unknown_error = "unknown error";
@@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
unsigned long flags;
- ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost);
/* Re-enable the CRQ */
do {
@@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_init_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost);
return rc;
}
@@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_queue *crq = &vhost->crq;
- ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost);
/* Close the CRQ */
do {
@@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_init_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost);
return rc;
}
@@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
queue->cur = 0;
queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
+
+ queue->vhost = vhost;
return 0;
}
@@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
ENTER;
- if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
- return -ENOMEM;
-
rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
&scrq->cookie, &scrq->hw_irq);
@@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
}
scrq->hwq_id = index;
- scrq->vhost = vhost;
LEAVE;
return 0;
@@ -5800,7 +5798,6 @@ irq_failed:
rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
} while (rtas_busy_delay(rc));
reg_failed:
- ibmvfc_free_queue(vhost, scrq);
LEAVE;
return rc;
}
@@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
if (rc)
dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
- ibmvfc_free_queue(vhost, scrq);
+ /* Clean out the queue */
+ memset(scrq->msgs.crq, 0, PAGE_SIZE);
+ scrq->cur = 0;
+
+ LEAVE;
+}
+
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i, j;
+
+ ENTER;
+ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ if (ibmvfc_register_scsi_channel(vhost, i)) {
+ for (j = i; j > 0; j--)
+ ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ vhost->do_enquiry = 0;
+ return;
+ }
+ }
+
+ LEAVE;
+}
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i;
+
+ ENTER;
+ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++)
+ ibmvfc_deregister_scsi_channel(vhost, i);
+
LEAVE;
}
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
{
+ struct ibmvfc_queue *scrq;
int i, j;
ENTER;
@@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
}
for (i = 0; i < nr_scsi_hw_queues; i++) {
- if (ibmvfc_register_scsi_channel(vhost, i)) {
- for (j = i; j > 0; j--)
- ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
+ for (j = i; j > 0; j--) {
+ scrq = &vhost->scsi_scrqs.scrqs[j - 1];
+ ibmvfc_free_queue(vhost, scrq);
+ }
kfree(vhost->scsi_scrqs.scrqs);
vhost->scsi_scrqs.scrqs = NULL;
vhost->scsi_scrqs.active_queues = 0;
vhost->do_enquiry = 0;
- break;
+ vhost->mq_enabled = 0;
+ return;
}
}
+ ibmvfc_reg_sub_crqs(vhost);
+
LEAVE;
}
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
{
+ struct ibmvfc_queue *scrq;
int i;
ENTER;
if (!vhost->scsi_scrqs.scrqs)
return;
- for (i = 0; i < nr_scsi_hw_queues; i++)
- ibmvfc_deregister_scsi_channel(vhost, i);
+ ibmvfc_dereg_sub_crqs(vhost);
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ ibmvfc_free_queue(vhost, scrq);
+ }
kfree(vhost->scsi_scrqs.scrqs);
vhost->scsi_scrqs.scrqs = NULL;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3718406e0988..c39a245f43d0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -789,6 +789,7 @@ struct ibmvfc_queue {
spinlock_t _lock;
spinlock_t *q_lock;
+ struct ibmvfc_host *vhost;
struct ibmvfc_event_pool evt_pool;
struct list_head sent;
struct list_head free;
@@ -797,7 +798,6 @@ struct ibmvfc_queue {
union ibmvfc_iu cancel_rsp;
/* Sub-CRQ fields */
- struct ibmvfc_host *vhost;
unsigned long cookie;
unsigned long vios_cookie;
unsigned long hw_irq;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 256ec6d08c16..9d01a3e3c26a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) {
- while (--i > 0)
+ while (--i >= 0)
dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq,
@@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
- while (--i >= 0)
+ while (--i > 0)
free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b1be0dd0337a..f5d74958b664 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
-void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
- struct lpfc_wcqe_complete *w);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl);
+ struct lpfc_iocbq *rspiocb);
void lpfc_create_multixri_pools(struct lpfc_hba *phba);
void lpfc_create_destroy_pools(struct lpfc_hba *phba);
void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9d36b20fb878..13dfe285493d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
memset(bpl, 0, sizeof(struct ulp_bde64));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
bpl->tus.w = le32_to_cpu(bpl->tus.w);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 07f9a6e61e10..3fababb7c181 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ulp_status,
ulp_word4);
- /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
skip_recovery = 1;
goto out;
}
@@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
- /* Presume the node was released. */
- return;
+ goto out_rsrc_free;
}
out:
- /* Driver is done with the IO. */
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
* on link up processing. For a LOGO / PLOGI initiated by the
@@ -3059,6 +3048,10 @@ out:
ndlp->nlp_DID, ulp_status,
ulp_word4, tmo,
vport->num_disc_nodes);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
lpfc_disc_start(vport);
return;
}
@@ -3075,6 +3068,10 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
+out_rsrc_free:
+ /* Driver is done with the I/O. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8511369d2cf8..f024415731ac 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4487,6 +4487,9 @@ struct wqe_common {
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
+#define wqe_ffrq_SHIFT 6
+#define wqe_ffrq_MASK 0x00000001
+#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 93b94c64518d..750dd1e9f2cc 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
rc = pci_enable_msi(phba->pcidev);
if (!rc)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0462 PCI enable MSI mode success.\n");
+ "0012 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0471 PCI enable MSI mode failed (%d)\n", rc);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 639f86635127..b86ff9fcdf0c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) {
- if (vport->port_state <= LPFC_FDISC)
+ if (vport->port_state <= LPFC_FDISC ||
+ vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 335e90633933..cd10ee6482fc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
+ /* Get the NVME cmd details for this unique error. */
+ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+ ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
/* Check if this is really an ERSP */
if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
lpfc_ncmd->status = IOSTAT_SUCCESS;
lpfc_ncmd->result = 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6084 NVME Completion ERSP: "
- "xri %x placed x%x\n",
- lpfc_ncmd->cur_iocbq.sli4_xritag,
- wcqe->total_data_placed);
+ "6084 NVME FCP_ERR ERSP: "
+ "xri %x placed x%x opcode x%x cmd_id "
+ "x%x cqe_status x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
- "placed x%x\n",
+ "placed x%x opcode x%x cmd_id x%x, "
+ "cqe_status x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
- wcqe->total_data_placed);
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
case IOSTAT_LOCAL_REJECT:
/* Let fall through to set command final state. */
@@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ struct nvme_common_command *sqe;
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
@@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
cstat->control_requests++;
}
- if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+ sqe = &((struct nvme_fc_cmd_iu *)
+ nCmd->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_async_event)
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+ }
+
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
- * @abts_cmpl: Pointer to wcqe complete object.
+ * @rspiocb: Pointer to response iocb object.
*
* This is the callback function for any NVME FCP IO that was aborted.
*
@@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
**/
void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
@@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
+ struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ /*
+ * Get Command Id from cmd to plug into response. This
+ * code is not needed in the next NVME Transport drop.
+ */
+ cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x\n",
- nvmereq_wqe->sli4_xritag);
+ "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
+ nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
+ cp->sqe.common.command_id);
return;
out_unlock:
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d43968203248..ba5e4016262e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status;
u32 logit = LOG_FCP;
+ if (!rport)
+ return FAILED;
+
rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+ if (!rport)
+ return FAILED;
+
rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6ed696c4602a..80ac3a051c19 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "6213 No available WQEs for CMF_SYNC_WQE\n");
+ "6244 No available WQEs for CMF_SYNC_WQE\n");
ret_val = ENOMEM;
goto out_unlock;
}
@@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
set_job_ulpword4(cmdiocbp,
IOERR_ABORT_REQUESTED);
/*
- * For SLI4, irsiocb contains
+ * For SLI4, irspiocb contains
* NO_XRI in sli_xritag, it
* shall not affect releasing
* sgl (xri) process.
@@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
}
- (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
+ cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
@@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (cmdiocbq->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- (cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
- &rspiocbq);
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
}
break;
@@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* @flag: Flag indicating if this command can be put into txq.
*
* __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
- * send an iocb command to an HBA with SLI-4 interface spec.
+ * send an iocb command to an HBA with SLI-3 interface spec.
*
* This function takes the hbalock before invoking the lockless version.
* The function will return success after it successfully submit the wqe to
@@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
cmdiocbq->wait_cmd_cmpl = NULL;
if (cmdiocbq->cmd_cmpl)
- (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
else
lpfc_sli_release_iocbq(phba, cmdiocbq);
return;
@@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
/* Set the exchange busy flag for task management commands */
if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
- !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
+ !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
- cur_iocbq);
+ cur_iocbq);
if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
@@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
* @irspiocbq: Pointer to work-queue completion queue entry.
*
* This routine handles an ELS work-queue completion event and construct
- * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
* discovery engine to handle.
*
* Return: Pointer to the receive IOCBQ, NULL otherwise.
@@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
@@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Pass the cmd_iocb and the wcqe to the upper layer */
memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
sizeof(struct lpfc_wcqe_complete));
- (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
} else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
@@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
/* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb,
- &iocbq->list, list) {
+ &iocbq->list, list) {
list_del_init(&curr_iocb->list);
lpfc_sli_release_iocbq(phba, curr_iocb);
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4fab79ed58ed..2ab6f7db64d8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.3"
+#define LPFC_DRIVER_VERSION "14.2.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c95360a3c186..0917b05059b4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3195,6 +3195,9 @@ static int megasas_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
offset += map->nr_queues;
+ /* we never use READ queue, so can't cheat blk-mq */
+ shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
+
/* Setup Poll hctx */
map = &shost->tag_set.map[HCTX_TYPE_POLL];
map->nr_queues = instance->iopoll_q_count;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 37d46ae5c61d..9a1ae52bb621 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+ u16 depth;
int sz;
int rc = 0;
@@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
goto out;
/* sas iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
if (!sas_iounit_pg1) {
pr_err("%s: failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
ioc->name, __FILE__, __LINE__, __func__);
goto out;
}
- ioc->max_wideport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
- MPT3SAS_SAS_QUEUE_DEPTH;
- ioc->max_narrowport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
- MPT3SAS_SAS_QUEUE_DEPTH;
- ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
- sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
+ ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
+ ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = sas_iounit_pg1->SATAMaxQDepth;
+ ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
+
/* pcie iounit page 1 */
rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
&pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index f7466a895d3b..991eb01bb1e0 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3145,15 +3145,6 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
if (!phy->phy_attached)
return;
- if (sas_phy->phy) {
- struct sas_phy *sphy = sas_phy->phy;
- sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
- sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
- sphy->maximum_linkrate_hw = phy->maximum_linkrate;
- }
-
if (phy->phy_type & PORT_TYPE_SAS) {
struct sas_identify_frame *id;
id = (struct sas_identify_frame *)phy->frame_rcvd;
@@ -3177,26 +3168,22 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
switch (link_rate) {
case PHY_SPEED_120:
phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
break;
case PHY_SPEED_60:
phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
break;
case PHY_SPEED_30:
phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
case PHY_SPEED_15:
phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
}
sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
- sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+ sas_phy->maximum_linkrate_hw = phy->maximum_linkrate;
sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
- sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ sas_phy->maximum_linkrate = phy->maximum_linkrate;
+ sas_phy->minimum_linkrate = phy->minimum_linkrate;
}
/**
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 9b04f1a6a67d..01f2f41928eb 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -143,6 +143,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 01c5e8ff4cc5..303cd05fec50 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3723,8 +3723,12 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
phyid, status);
if (status == PHY_STOP_SUCCESS ||
- status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED;
+ phy->sas_phy.linkrate = SAS_PHY_DISABLED;
+ }
+
return 0;
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index bfce60183a6e..836ddc476764 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
return 0;
out_unwind:
- while (--i > 0)
+ while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev);
return rc;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1f423f723d06..b8a76b89f85a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
}
}
+static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ switch (zsp->z_cond) {
+ case ZC2_IMPLICIT_OPEN:
+ devip->nr_imp_open--;
+ break;
+ case ZC3_EXPLICIT_OPEN:
+ devip->nr_exp_open--;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
+ zsp->z_start, zsp->z_cond);
+ break;
+ }
+ zsp->z_cond = ZC5_FULL;
+}
+
static void zbc_inc_wp(struct sdebug_dev_info *devip,
unsigned long long lba, unsigned int num)
{
@@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
if (zsp->z_type == ZBC_ZTYPE_SWR) {
zsp->z_wp += num;
if (zsp->z_wp >= zend)
- zsp->z_cond = ZC5_FULL;
+ zbc_set_zone_full(devip, zsp);
return;
}
@@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
n = num;
}
if (zsp->z_wp >= zend)
- zsp->z_cond = ZC5_FULL;
+ zbc_set_zone_full(devip, zsp);
num -= n;
lba += n;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 2c0dd64159b0..5d21f07456c6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size)
return NULL;
mutex_lock(&iscsi_ep_idr_mutex);
- id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+
+ /*
+ * First endpoint id should be 1 to comply with user space
+ * applications (iscsid).
+ */
+ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
if (id < 0) {
mutex_unlock(&iscsi_ep_idr_mutex);
printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 895b56c8f25e..a1a2ac09066f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp)
goto out;
/* We must have at least a 64B header and one 32B range descriptor */
- vpd_len = get_unaligned_be16(&buffer[2]) + 3;
+ vpd_len = get_unaligned_be16(&buffer[2]) + 4;
if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
sd_printk(KERN_ERR, sdkp,
"Invalid Concurrent Positioning Ranges VPD page\n");
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ca3530982e52..fe000da11332 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = {
.cmd_per_lun = 2048,
.this_id = -1,
/* Ensure there are no gaps in presented sgls */
- .virt_boundary_mask = PAGE_SIZE-1,
+ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
.no_write_same = 1,
.track_queue_depth = 1,
.change_queue_depth = storvsc_change_queue_depth,
@@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device,
int target = 0;
struct storvsc_device *stor_device;
int max_sub_channels = 0;
+ u32 max_xfer_bytes;
/*
* We support sub-channels for storage on SCSI and FC controllers.
@@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device,
}
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
-
/*
- * set the table size based on the info we got
- * from the host.
+ * Any reasonable Hyper-V configuration should provide
+ * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
+ * protecting it from any weird value.
+ */
+ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
+ /* max_hw_sectors_kb */
+ host->max_sectors = max_xfer_bytes >> 9;
+ /*
+ * There are 2 requirements for Hyper-V storvsc sgl segments,
+ * based on which the below calculation for max segments is
+ * done:
+ *
+ * 1. Except for the first and last sgl segment, all sgl segments
+ * should be align to HV_HYP_PAGE_SIZE, that also means the
+ * maximum number of segments in a sgl can be calculated by
+ * dividing the total max transfer length by HV_HYP_PAGE_SIZE.
+ *
+ * 2. Except for the first and last, each entry in the SGL must
+ * have an offset that is a multiple of HV_HYP_PAGE_SIZE.
*/
- host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
/*
* For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 51a82f7803d3..9d16cf925483 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc {
u8 tag;
u8 bus;
u8 target;
- u8 vcpuHint;
- u8 unused[59];
+ u16 vcpuHint;
+ u8 unused[58];
} __packed;
/*
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index b2d365ae0282..dae8a2e0f745 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -91,14 +91,14 @@ static const struct at91_soc socs[] __initconst = {
AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
"sam9x60", "sam9x60"),
- AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH,
- AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_D5M_EXID_MATCH,
"sam9x60 64MiB DDR2 SiP", "sam9x60"),
- AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH,
- AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_D1G_EXID_MATCH,
"sam9x60 128MiB DDR2 SiP", "sam9x60"),
- AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH,
- AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH,
"sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index 3cbb165d6e30..70ad0f3dce28 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -783,6 +783,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
}
ret = brcmstb_init_sram(dn);
+ of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
return ret;
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 7f49385ed2f8..7ebc28709e94 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -667,7 +667,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
},
[IMX8MP_MEDIABLK_PD_LCDIF_2] = {
.name = "mediablk-lcdif-2",
- .clk_names = (const char *[]){ "disp1", "apb", "axi", },
+ .clk_names = (const char *[]){ "disp2", "apb", "axi", },
.num_clks = 3,
.gpc_name = "lcdif2",
.rst_mask = BIT(11) | BIT(12) | BIT(24),
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index 613935cb6a48..58240e320c13 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -758,7 +758,7 @@ static const struct of_device_id ixp4xx_npe_of_match[] = {
static struct platform_driver ixp4xx_npe_driver = {
.driver = {
.name = "ixp4xx-npe",
- .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+ .of_match_table = ixp4xx_npe_of_match,
},
.probe = ixp4xx_npe_probe,
.remove = ixp4xx_npe_remove,
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 3e95835653ea..4f163d62942c 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -926,7 +926,7 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
struct smem_partition_header *header;
struct smem_ptable_entry *entry;
struct smem_ptable *ptable;
- unsigned int remote_host;
+ u16 remote_host;
u16 host0, host1;
int i;
@@ -951,12 +951,12 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
continue;
if (remote_host >= SMEM_HOST_COUNT) {
- dev_err(smem->dev, "bad host %hu\n", remote_host);
+ dev_err(smem->dev, "bad host %u\n", remote_host);
return -EINVAL;
}
if (smem->partitions[remote_host].virt_base) {
- dev_err(smem->dev, "duplicate host %hu\n", remote_host);
+ dev_err(smem->dev, "duplicate host %u\n", remote_host);
return -EINVAL;
}
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index cba6a4486c24..efdcbe6c4c26 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -33,6 +33,7 @@
#define AMD_SPI_RX_COUNT_REG 0x4B
#define AMD_SPI_STATUS_REG 0x4C
+#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
/* M_CMD OP codes for SPI */
@@ -270,6 +271,11 @@ static int amd_spi_master_transfer(struct spi_master *master,
return 0;
}
+static size_t amd_spi_max_transfer_size(struct spi_device *spi)
+{
+ return AMD_SPI_FIFO_SIZE;
+}
+
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -302,6 +308,8 @@ static int amd_spi_probe(struct platform_device *pdev)
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = amd_spi_master_setup;
master->transfer_one_message = amd_spi_master_transfer;
+ master->max_transfer_size = amd_spi_max_transfer_size;
+ master->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
err = devm_spi_register_master(dev, master);
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index 496f3e1e9079..3e891bf22470 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -558,6 +558,14 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
u32 ctl_val;
int ret = 0;
+ dev_dbg(aspi->dev,
+ "CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n",
+ chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
+ desc->info.offset, desc->info.offset + desc->info.length,
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.nbytes, op->dummy.nbytes);
+
chip->clk_freq = desc->mem->spi->max_speed_hz;
/* Only for reads */
@@ -574,9 +582,11 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;
ctl_val |= aspeed_spi_get_io_mode(op) |
op->cmd.opcode << CTRL_COMMAND_SHIFT |
- CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth) |
CTRL_IO_MODE_READ;
+ if (op->dummy.nbytes)
+ ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth);
+
/* Tune 4BYTE address mode */
if (op->addr.nbytes) {
u32 addr_mode = readl(aspi->regs + CE_CTRL_REG);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 775c0bf2f923..0933948d7df3 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
- dmaengine_terminate_sync(ctlr->dma_tx);
- bs->tx_dma_active = false;
- dmaengine_terminate_sync(ctlr->dma_rx);
- bs->rx_dma_active = false;
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ bs->tx_dma_active = false;
+ }
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ bs->rx_dma_active = false;
+ }
bcm2835_spi_undo_prologue(bs);
/* and reset */
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 2b9fc8449a62..72b1a5a2298c 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1578,8 +1578,7 @@ static int cqspi_probe(struct platform_device *pdev)
ret = cqspi_of_get_pdata(cqspi);
if (ret) {
dev_err(dev, "Cannot get mandatory OF data.\n");
- ret = -ENODEV;
- goto probe_master_put;
+ return -ENODEV;
}
/* Obtain QSPI clock. */
@@ -1587,7 +1586,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->clk)) {
dev_err(dev, "Cannot claim QSPI clock.\n");
ret = PTR_ERR(cqspi->clk);
- goto probe_master_put;
+ return ret;
}
/* Obtain and remap controller address. */
@@ -1596,7 +1595,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->iobase)) {
dev_err(dev, "Cannot remap controller address.\n");
ret = PTR_ERR(cqspi->iobase);
- goto probe_master_put;
+ return ret;
}
/* Obtain and remap AHB address. */
@@ -1605,7 +1604,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(cqspi->ahb_base)) {
dev_err(dev, "Cannot remap AHB address.\n");
ret = PTR_ERR(cqspi->ahb_base);
- goto probe_master_put;
+ return ret;
}
cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
cqspi->ahb_size = resource_size(res_ahb);
@@ -1614,15 +1613,13 @@ static int cqspi_probe(struct platform_device *pdev)
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = -ENXIO;
- goto probe_master_put;
- }
+ if (irq < 0)
+ return -ENXIO;
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- goto probe_master_put;
+ return ret;
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
@@ -1716,8 +1713,6 @@ probe_reset_failed:
probe_clk_failed:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-probe_master_put:
- spi_master_put(master);
return ret;
}
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index a23d4f6329f5..6a7f7df1e776 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -69,6 +69,7 @@
#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
+#define CDNS_SPI_NOSS 0xF /* No Slave select */
/*
* SPI Interrupt Registers bit Masks
@@ -92,9 +93,6 @@
#define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */
#define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */
-/* SPI FIFO depth in bytes */
-#define CDNS_SPI_FIFO_DEPTH 128
-
/* Default number of chip select lines */
#define CDNS_SPI_DEFAULT_NUM_CS 4
@@ -110,6 +108,7 @@
* @rx_bytes: Number of bytes requested
* @dev_busy: Device busy flag
* @is_decoded_cs: Flag for decoder property set or not
+ * @tx_fifo_depth: Depth of the TX FIFO
*/
struct cdns_spi {
void __iomem *regs;
@@ -123,6 +122,7 @@ struct cdns_spi {
int rx_bytes;
u8 dev_busy;
u32 is_decoded_cs;
+ unsigned int tx_fifo_depth;
};
/* Macros for the SPI controller read/write */
@@ -304,7 +304,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
{
unsigned long trans_cnt = 0;
- while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
+ while ((trans_cnt < xspi->tx_fifo_depth) &&
(xspi->tx_bytes > 0)) {
/* When xspi in busy condition, bytes may send failed,
@@ -450,20 +450,43 @@ static int cdns_prepare_transfer_hardware(struct spi_master *master)
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
- * This function disables the SPI master controller.
+ * This function disables the SPI master controller when no slave selected.
*
* Return: 0 always
*/
static int cdns_unprepare_transfer_hardware(struct spi_master *master)
{
struct cdns_spi *xspi = spi_master_get_devdata(master);
+ u32 ctrl_reg;
- cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+ /* Disable the SPI if slave is deselected */
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+ ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >> CDNS_SPI_SS_SHIFT;
+ if (ctrl_reg == CDNS_SPI_NOSS)
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
return 0;
}
/**
+ * cdns_spi_detect_fifo_depth - Detect the FIFO depth of the hardware
+ * @xspi: Pointer to the cdns_spi structure
+ *
+ * The depth of the TX FIFO is a synthesis configuration parameter of the SPI
+ * IP. The FIFO threshold register is sized so that its maximum value can be the
+ * FIFO size - 1. This is used to detect the size of the FIFO.
+ */
+static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi)
+{
+ /* The MSBs will get truncated giving us the size of the FIFO */
+ cdns_spi_write(xspi, CDNS_SPI_THLD, 0xffff);
+ xspi->tx_fifo_depth = cdns_spi_read(xspi, CDNS_SPI_THLD) + 1;
+
+ /* Reset to default */
+ cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1);
+}
+
+/**
* cdns_spi_probe - Probe method for the SPI driver
* @pdev: Pointer to the platform_device structure
*
@@ -535,6 +558,8 @@ static int cdns_spi_probe(struct platform_device *pdev)
if (ret < 0)
xspi->is_decoded_cs = 0;
+ cdns_spi_detect_fifo_depth(xspi);
+
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index e8de4f5017cd..0c79193d9697 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -808,7 +808,7 @@ int spi_mem_poll_status(struct spi_mem *mem,
op->data.dir != SPI_MEM_DATA_IN)
return -EINVAL;
- if (ctlr->mem_ops && ctlr->mem_ops->poll_status) {
+ if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index a08215eb9e14..79242dc5272d 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -381,15 +381,18 @@ static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
rs->rx_left = xfer->len / rs->n_bytes;
- if (rs->cs_inactive)
- writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
- else
- writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+ writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
+
spi_enable_chip(rs, true);
if (rs->tx_left)
rockchip_spi_pio_writer(rs);
+ if (rs->cs_inactive)
+ writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
+ else
+ writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+
/* 1 means the transfer is in progress */
return 1;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 7a014eeec2d0..411b1307b7fd 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
rspi->dma_callbacked, HZ);
if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
+ if (tx)
+ dmaengine_synchronize(rspi->ctlr->dma_tx);
+ if (rx)
+ dmaengine_synchronize(rspi->ctlr->dma_rx);
} else {
if (!ret) {
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index d1a0dea09ef0..d0ba34cc32f7 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
- depends on OLPC && FB
+ depends on OLPC && FB && BROKEN
depends on I2C
depends on GPIO_CS5535 && ACPI
select BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 3d8e9dea7651..7135d89caac1 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -178,8 +178,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- res = rtw_alloc_hwxmits(padapter);
- if (res) {
+ if (rtw_alloc_hwxmits(padapter)) {
res = _FAIL;
goto exit;
}
@@ -1483,19 +1482,10 @@ int rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits = pxmitpriv->hwxmits;
- if (pxmitpriv->hwxmit_entry == 5) {
- hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
- hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
- hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
- hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
- } else if (pxmitpriv->hwxmit_entry == 4) {
- hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
- hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
- hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
- hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- } else {
- }
+ hwxmits[0].sta_queue = &pxmitpriv->vo_pending;
+ hwxmits[1].sta_queue = &pxmitpriv->vi_pending;
+ hwxmits[2].sta_queue = &pxmitpriv->be_pending;
+ hwxmits[3].sta_queue = &pxmitpriv->bk_pending;
return 0;
}
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 1b09462ca908..8dd280e2739a 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ wep_total_len = wep_key_len + sizeof(*pwep);
pwep = kzalloc(wep_total_len, GFP_KERNEL);
if (!pwep)
goto exit;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index ece97e37ac91..30374a820496 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -90,7 +90,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
- pwep = kzalloc(wep_total_len, GFP_KERNEL);
+ /* Allocate a full structure to avoid potentially running off the end. */
+ pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
if (!pwep) {
ret = -ENOMEM;
goto exit;
@@ -582,7 +583,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
- pwep = kzalloc(wep_total_len, GFP_KERNEL);
+ /* Allocate a full structure to avoid potentially running off the end. */
+ pwep = kzalloc(sizeof(*pwep), GFP_KERNEL);
if (!pwep)
goto exit;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 33844526c797..02fdef7a16c8 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -2632,7 +2632,7 @@ static void hfa384x_usbctlx_reaper_task(struct work_struct *work)
*/
static void hfa384x_usbctlx_completion_task(struct work_struct *work)
{
- struct hfa384x *hw = container_of(work, struct hfa384x, reaper_bh);
+ struct hfa384x *hw = container_of(work, struct hfa384x, completion_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e68f1cc8ef98..6c8d8b051bfd 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -448,6 +448,9 @@ fd_execute_write_same(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+ if (!cmd->t_data_nents)
+ return TCM_INVALID_CDB_FIELD;
+
if (cmd->t_data_nents > 1 ||
cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 378c80313a0f..1ed9381751e6 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -494,6 +494,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
+ if (!cmd->t_data_nents)
+ return TCM_INVALID_CDB_FIELD;
+
sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ca1b2312d6e7..f6132836eb38 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -312,6 +312,12 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
+
+ if (flags & 0x01) {
+ pr_warn("WRITE SAME with NDOB not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
/*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index c60896cf71cb..73b5e7760d10 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -189,7 +189,7 @@ struct optee_smc_call_get_os_revision_result {
* Have config return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 Physical address of start of SHM
- * a2 Size of of SHM
+ * a2 Size of SHM
* a3 Cache settings of memory, as defined by the
* OPTEE_SMC_SHM_* values above
* a4-7 Preserved
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 385cb0aee610..a1c1fa1a9c28 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -884,8 +884,8 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
- if (IS_ERR(arg))
- return PTR_ERR(arg);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
}
if (rpc_arg && tee_shm_is_dynamic(shm)) {
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index af0f7c603fa4..98da206cd761 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1073,7 +1073,7 @@ EXPORT_SYMBOL_GPL(tee_device_unregister);
/**
* tee_get_drvdata() - Return driver_data pointer
* @teedev: Device containing the driver_data pointer
- * @returns the driver_data pointer supplied to tee_register().
+ * @returns the driver_data pointer supplied to tee_device_alloc().
*/
void *tee_get_drvdata(struct tee_device *teedev)
{
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index cd80c7db4073..a9596e7562ea 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -81,6 +81,7 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
{}
};
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index c7968aecd870..d02de3f0326f 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -426,7 +426,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = NULL;
- free_irq(qtty->irq, pdev);
+ free_irq(qtty->irq, qtty);
tty_port_destroy(&qtty->port);
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 137eebdcfda9..fd4d24f61c46 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -455,7 +455,7 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
return;
}
- prefix = kasprintf(GFP_KERNEL, "%s: ", fname);
+ prefix = kasprintf(GFP_ATOMIC, "%s: ", fname);
if (!prefix)
return;
print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 74bfabe5b453..752dab3356d7 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -111,21 +111,11 @@ static void pty_unthrottle(struct tty_struct *tty)
static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
- unsigned long flags;
- if (tty->flow.stopped)
+ if (tty->flow.stopped || !c)
return 0;
- if (c > 0) {
- spin_lock_irqsave(&to->port->lock, flags);
- /* Stuff the data into the input queue of the other end */
- c = tty_insert_flip_string(to->port, buf, c);
- spin_unlock_irqrestore(&to->port->lock, flags);
- /* And shovel */
- if (c)
- tty_flip_buffer_push(to->port);
- }
- return c;
+ return tty_insert_flip_string_and_push_buffer(to->port, buf, c);
}
/**
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index cfbd2de0ca6e..3f56dbc9432b 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -23,6 +23,7 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/tty.h>
#include <linux/ratelimit.h>
#include <linux/tty_flip.h>
@@ -559,6 +560,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
up->port.dev = dev;
+ if (uart_console_enabled(&up->port))
+ pm_runtime_get_sync(up->port.dev);
+
serial8250_apply_quirks(up);
uart_add_one_port(drv, &up->port);
}
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7133fceed35e..a8dba4a0a8fb 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -106,10 +106,10 @@ int serial8250_tx_dma(struct uart_8250_port *p)
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_async_issue_pending(dma->txchan);
- if (dma->tx_err) {
+ serial8250_clear_THRI(p);
+ if (dma->tx_err)
dma->tx_err = 0;
- serial8250_clear_THRI(p);
- }
+
return 0;
err:
dma->tx_err = 1;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index f57bbd32ef11..bb6aca07ab56 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -47,7 +47,7 @@
#define RZN1_UART_xDMACR_DMA_EN BIT(0)
#define RZN1_UART_xDMACR_1_WORD_BURST (0 << 1)
#define RZN1_UART_xDMACR_4_WORD_BURST (1 << 1)
-#define RZN1_UART_xDMACR_8_WORD_BURST (3 << 1)
+#define RZN1_UART_xDMACR_8_WORD_BURST (2 << 1)
#define RZN1_UART_xDMACR_BLK_SZ(x) ((x) << 3)
/* Quirks */
@@ -773,18 +773,18 @@ static const struct of_device_id dw8250_of_match[] = {
MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = {
- { "INT33C4", 0 },
- { "INT33C5", 0 },
- { "INT3434", 0 },
- { "INT3435", 0 },
- { "80860F0A", 0 },
- { "8086228A", 0 },
- { "APMC0D08", 0},
- { "AMD0020", 0 },
- { "AMDI0020", 0 },
- { "AMDI0022", 0 },
- { "BRCM2032", 0 },
- { "HISI0031", 0 },
+ { "80860F0A", (kernel_ulong_t)&dw8250_dw_apb },
+ { "8086228A", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMD0020", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb },
+ { "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb },
+ { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb},
+ { "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb },
+ { "HISI0031", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT33C4", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 78b6dedc43e6..3c36a06a20b0 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1517,6 +1517,8 @@ static inline void __stop_tx(struct uart_8250_port *p)
unsigned char lsr = serial_in(p, UART_LSR);
u64 stop_delay = 0;
+ p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+
if (!(lsr & UART_LSR_THRE))
return;
/*
@@ -1947,7 +1949,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
if ((status & UART_LSR_THRE) && (up->ier & UART_IER_THRI)) {
if (!up->dma || up->dma->tx_err)
serial8250_tx_chars(up);
- else
+ else if (!up->dma->tx_running)
__stop_tx(up);
}
@@ -2973,8 +2975,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
- if (!port->mapbase)
+ if (!port->mapbase) {
+ ret = -EINVAL;
break;
+ }
if (!request_mem_region(port->mapbase, size, "serial")) {
ret = -EBUSY;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 97ef41cb2721..16a21422ddce 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1367,6 +1367,15 @@ static void pl011_stop_rx(struct uart_port *port)
pl011_dma_rx_stop(uap);
}
+static void pl011_throttle_rx(struct uart_port *port)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pl011_stop_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
static void pl011_enable_ms(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1788,9 +1797,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
*/
static void pl011_enable_interrupts(struct uart_amba_port *uap)
{
+ unsigned long flags;
unsigned int i;
- spin_lock_irq(&uap->port.lock);
+ spin_lock_irqsave(&uap->port.lock, flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
@@ -1812,7 +1822,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
- spin_unlock_irq(&uap->port.lock);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+}
+
+static void pl011_unthrottle_rx(struct uart_port *port)
+{
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
+
+ pl011_enable_interrupts(uap);
}
static int pl011_startup(struct uart_port *port)
@@ -2225,6 +2242,8 @@ static const struct uart_ops amba_pl011_pops = {
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
+ .throttle = pl011_throttle_rx,
+ .unthrottle = pl011_unthrottle_rx,
.enable_ms = pl011_enable_ms,
.break_ctl = pl011_break_ctl,
.startup = pl011_startup,
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 0429c2a54290..93489fe334d0 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -470,14 +470,14 @@ static void mvebu_uart_shutdown(struct uart_port *port)
}
}
-static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
{
unsigned int d_divisor, m_divisor;
unsigned long flags;
u32 brdv, osamp;
if (!port->uartclk)
- return -EOPNOTSUPP;
+ return 0;
/*
* The baudrate is derived from the UART clock thanks to divisors:
@@ -548,7 +548,7 @@ static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
(m_divisor << 16) | (m_divisor << 24);
writel(osamp, port->membase + UART_OSAMP);
- return 0;
+ return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
}
static void mvebu_uart_set_termios(struct uart_port *port,
@@ -587,15 +587,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
max_baud = port->uartclk / 80;
baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
- if (mvebu_uart_baud_rate_set(port, baud)) {
- /* No clock available, baudrate cannot be changed */
- if (old)
- baud = uart_get_baud_rate(port, old, NULL,
- min_baud, max_baud);
- } else {
- tty_termios_encode_baud_rate(termios, baud, baud);
- uart_update_timeout(port, termios->c_cflag, baud);
- }
+ baud = mvebu_uart_baud_rate_set(port, baud);
+
+ /* In case baudrate cannot be changed, report previous old value */
+ if (baud == 0 && old)
+ baud = tty_termios_baud_rate(old);
/* Only the following flag changes are supported */
if (old) {
@@ -606,6 +602,11 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_cflag |= CS8;
}
+ if (baud != 0) {
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ }
+
spin_unlock_irqrestore(&port->lock, flags);
}
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 4733a233bd0c..f8f950641ad9 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1306,6 +1306,7 @@ static const struct uart_ops qcom_geni_console_pops = {
.stop_tx = qcom_geni_serial_stop_tx,
.start_tx = qcom_geni_serial_start_tx,
.stop_rx = qcom_geni_serial_stop_rx,
+ .start_rx = qcom_geni_serial_start_rx,
.set_termios = qcom_geni_serial_set_termios,
.startup = qcom_geni_serial_startup,
.request_port = qcom_geni_serial_request_port,
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index d5ca904def34..1afe47b62ad5 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -377,8 +377,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
/* Enable tx dma mode */
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
- ucon |= (dma_get_cache_alignment() >= 16) ?
- S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
+ ucon |= S3C64XX_UCON_TXBURST_1;
ucon |= S3C64XX_UCON_TXMODE_DMA;
wr_regl(port, S3C2410_UCON, ucon);
@@ -674,7 +673,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
S3C64XX_UCON_DMASUS_EN |
S3C64XX_UCON_TIMEOUT_EN |
S3C64XX_UCON_RXMODE_MASK);
- ucon |= S3C64XX_UCON_RXBURST_16 |
+ ucon |= S3C64XX_UCON_RXBURST_1 |
0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
S3C64XX_UCON_EMPTYINT_EN |
S3C64XX_UCON_TIMEOUT_EN |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 9a85b41caa0a..3dc926d6c00a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1941,11 +1941,6 @@ static int uart_proc_show(struct seq_file *m, void *v)
}
#endif
-static inline bool uart_console_enabled(struct uart_port *port)
-{
- return uart_console(port) && (port->cons->flags & CON_ENABLED);
-}
-
static void uart_port_spin_lock_init(struct uart_port *port)
{
spin_lock_init(&port->lock);
@@ -2214,11 +2209,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
/*
* Nothing to do if the console is not suspending
* except stop_rx to prevent any asynchronous data
- * over RX line. Re-start_rx, when required, is
- * done by set_termios in resume sequence
+ * over RX line. However ensure that we will be
+ * able to Re-start_rx later.
*/
if (!console_suspend_enabled && uart_console(uport)) {
- uport->ops->stop_rx(uport);
+ if (uport->ops->start_rx)
+ uport->ops->stop_rx(uport);
goto unlock;
}
@@ -2310,6 +2306,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (console_suspend_enabled)
uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
+ if (!console_suspend_enabled && uport->ops->start_rx)
+ uport->ops->start_rx(uport);
if (console_suspend_enabled)
console_start(uport->cons);
}
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index b7b44f4050d4..0973b03eeeaa 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -72,6 +72,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
*cr3 |= USART_CR3_DEM;
over8 = *cr1 & USART_CR1_OVER8;
+ *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+
if (over8)
rs485_deat_dedt = delay_ADE * baud * 8;
else
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 18e623325887..d2b2720db6ca 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask)
rcu_sysrq_start();
rcu_read_lock();
- printk_prefer_direct_enter();
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask)
pr_cont("\n");
console_loglevel = orig_log_level;
}
- printk_prefer_direct_exit();
rcu_read_unlock();
rcu_sysrq_end();
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index b710c5ef89ab..f310a8274df1 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -111,4 +111,7 @@ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t cnt);
+
#endif
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index bfa431a8e690..595d8b49c745 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -532,6 +532,15 @@ static void flush_to_ldisc(struct work_struct *work)
}
+static inline void tty_flip_buffer_commit(struct tty_buffer *tail)
+{
+ /*
+ * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+ * buffer data.
+ */
+ smp_store_release(&tail->commit, tail->used);
+}
+
/**
* tty_flip_buffer_push - push terminal buffers
* @port: tty port to push
@@ -546,16 +555,43 @@ void tty_flip_buffer_push(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- /*
- * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
- * buffer data.
- */
- smp_store_release(&buf->tail->commit, buf->tail->used);
+ tty_flip_buffer_commit(buf->tail);
queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
+ * tty_insert_flip_string_and_push_buffer - add characters to the tty buffer and
+ * push
+ * @port: tty port
+ * @chars: characters
+ * @size: size
+ *
+ * The function combines tty_insert_flip_string() and tty_flip_buffer_push()
+ * with the exception of properly holding the @port->lock.
+ *
+ * To be used only internally (by pty currently).
+ *
+ * Returns: the number added.
+ */
+int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
+ const unsigned char *chars, size_t size)
+{
+ struct tty_bufhead *buf = &port->buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ size = tty_insert_flip_string(port, chars, size);
+ if (size)
+ tty_flip_buffer_commit(buf->tail);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ queue_work(system_unbound_wq, &buf->work);
+
+ return size;
+}
+
+/**
* tty_buffer_init - prepare a tty buffer structure
* @port: tty port to initialise
*
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index f8c87c4d7399..dfc1f4b445f3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
unsigned short *p = (unsigned short *) vc->vc_pos;
vc_uniscr_delete(vc, nr);
- scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
+ scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2);
scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 01fb4bad86be..c7b337480e3e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -748,17 +748,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * ufshcd_utrl_clear() - Clear requests from the controller request list.
* @hba: per adapter instance
- * @pos: position of the bit to be cleared
+ * @mask: mask with one bit set for each request to be cleared
*/
-static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
{
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos),
- REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ mask = ~mask;
+ /*
+ * From the UFSHCI specification: "UTP Transfer Request List CLear
+ * Register (UTRLCLR): This field is bit significant. Each bit
+ * corresponds to a slot in the UTP Transfer Request List, where bit 0
+ * corresponds to request slot 0. A bit in this field is set to ‘0’
+ * by host software to indicate to the host controller that a transfer
+ * request slot is cleared. The host controller
+ * shall free up any resources associated to the request slot
+ * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
+ * host software indicates no change to request slots by setting the
+ * associated bits in this field to ‘1’. Bits in this field shall only
+ * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
+ */
+ ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/**
@@ -2863,27 +2874,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
return ufshcd_compose_devman_upiu(hba, lrbp);
}
-static int
-ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+/*
+ * Clear all the requests from the controller for which a bit has been set in
+ * @mask and wait until the controller confirms that these requests have been
+ * cleared.
+ */
+static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
{
- int err = 0;
unsigned long flags;
- u32 mask = 1 << tag;
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_utrl_clear(hba, tag);
+ ufshcd_utrl_clear(hba, mask);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
- err = ufshcd_wait_for_register(hba,
- REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000);
-
- return err;
+ return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000);
}
static int
@@ -2963,7 +2973,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
- if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
@@ -5728,7 +5738,7 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
}
hba->dev_info.wb_enabled = enable;
- dev_info(hba->dev, "%s Write Booster %s\n",
+ dev_dbg(hba->dev, "%s Write Booster %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
@@ -6958,14 +6968,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
}
/**
- * ufshcd_eh_device_reset_handler - device reset handler registered to
- * scsi layer.
+ * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
* Returns SUCCESS/FAILED
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
+ unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
u32 pos;
@@ -6984,14 +6994,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
}
/* clear the commands that were pending for corresponding LUN */
- for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lun) {
- err = ufshcd_clear_cmd(hba, pos);
- if (err)
- break;
- __ufshcd_transfer_req_compl(hba, 1U << pos);
- }
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
+ if (hba->lrb[pos].lun == lun)
+ __set_bit(pos, &pending_reqs);
+ hba->outstanding_reqs &= ~pending_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ not_cleared = pending_reqs &
+ ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ hba->outstanding_reqs |= not_cleared;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
+ __func__, not_cleared);
}
+ __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
out:
hba->req_abort_count = 0;
@@ -7088,7 +7108,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
goto out;
}
- err = ufshcd_clear_cmd(hba, tag);
+ err = ufshcd_clear_cmds(hba, 1U << tag);
if (err)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
@@ -7233,7 +7253,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_set_clk_freq(hba, true);
+ ufshcd_scale_clks(hba, true);
err = ufshcd_hba_enable(hba);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index e45c3d6e1536..794e413800ae 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1941,13 +1941,16 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
}
if (enqd_len + trb_buff_len >= full_len) {
- if (need_zero_pkt)
- zero_len_trb = !zero_len_trb;
-
- field &= ~TRB_CHAIN;
- field |= TRB_IOC;
- more_trbs_coming = false;
- preq->td.last_trb = ring->enqueue;
+ if (need_zero_pkt && !zero_len_trb) {
+ zero_len_trb = true;
+ } else {
+ zero_len_trb = false;
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ need_zero_pkt = false;
+ preq->td.last_trb = ring->enqueue;
+ }
}
/* Only set interrupt on short packet for OUT endpoints. */
@@ -1962,7 +1965,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
- cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
+ cdnsp_queue_trb(pdev, ring, more_trbs_coming,
lower_32_bits(send_addr),
upper_32_bits(send_addr),
length_field,
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index dc6c96e04bcf..3b8bf6daf7d0 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1048,6 +1048,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
struct ci_hdrc *ci = req->context;
unsigned long flags;
+ if (req->status < 0)
+ return;
+
if (ci->setaddr) {
hw_usb_set_address(ci, ci->address);
ci->setaddr = false;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f63a27d11fac..3f107a06817d 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5190,7 +5190,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -EINVAL;
- goto error1;
+ goto error2;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index e027c0420dc3..573421984948 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1644,13 +1644,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
* This device property is for kernel internal use only and
* is expected to be set by the glue code.
*/
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
- edev = extcon_get_extcon_dev(name);
- if (!edev)
- return ERR_PTR(-EPROBE_DEFER);
-
- return edev;
- }
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
+ return extcon_get_extcon_dev(name);
/*
* Try to get an extcon device from the USB PHY controller's "port"
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index fea7aca35dc8..173cf3579c55 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -195,8 +195,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
if (i == ARRAY_SIZE(dwc3_ti_rate_table)) {
dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
- ret = -EINVAL;
- goto err_clk_disable;
+ return -EINVAL;
}
data->rate_code = i;
@@ -204,7 +203,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
/* Read the syscon property and set the rate code */
ret = phy_syscon_pll_refclk(data);
if (ret)
- goto err_clk_disable;
+ return ret;
/* VBUS divider select */
data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
@@ -245,8 +244,6 @@ err_pm_disable:
clk_disable_unprepare(data->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
-err_clk_disable:
- clk_put(data->usb2_refclk);
return ret;
}
@@ -276,7 +273,6 @@ static int dwc3_ti_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- clk_put(data->usb2_refclk);
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index ba51de7dd760..6b018048fe2e 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -127,6 +127,7 @@ static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[
PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 00427d108ab9..0d89dfa6eef5 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2976,6 +2976,7 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
struct dwc3 *dwc = dep->dwc;
u32 mdwidth;
int size;
+ int maxpacket;
mdwidth = dwc3_mdwidth(dwc);
@@ -2988,21 +2989,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
else
size = DWC31_GTXFIFOSIZ_TXFDEP(size);
- /* FIFO Depth is in MDWDITH bytes. Multiply */
- size *= mdwidth;
-
/*
- * To meet performance requirement, a minimum TxFIFO size of 3x
- * MaxPacketSize is recommended for endpoints that support burst and a
- * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
- * support burst. Use those numbers and we can calculate the max packet
- * limit as below.
+ * maxpacket size is determined as part of the following, after assuming
+ * a mult value of one maxpacket:
+ * DWC3 revision 280A and prior:
+ * fifo_size = mult * (max_packet / mdwidth) + 1;
+ * maxpacket = mdwidth * (fifo_size - 1);
+ *
+ * DWC3 revision 290A and onwards:
+ * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
+ * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth;
*/
- if (dwc->maximum_speed >= USB_SPEED_SUPER)
- size /= 3;
+ if (DWC3_VER_IS_PRIOR(DWC3, 290A))
+ maxpacket = mdwidth * (size - 1);
else
- size /= 2;
+ maxpacket = mdwidth * ((size - 1) - 1) - mdwidth;
+ /* Functionally, space for one max packet is sufficient */
+ size = min_t(int, maxpacket, 1024);
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
dep->endpoint.max_streams = 16;
@@ -4245,7 +4249,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
}
evt->count = 0;
- evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
/* Unmask interrupt */
@@ -4257,6 +4260,9 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
+ /* Keep the clearing of DWC3_EVENT_PENDING at the end */
+ evt->flags &= ~DWC3_EVENT_PENDING;
+
return ret;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 4585ee3a444a..e0fa4b186ec6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -122,8 +122,6 @@ struct ffs_ep {
struct usb_endpoint_descriptor *descs[3];
u8 num;
-
- int status; /* P: epfile->mutex */
};
struct ffs_epfile {
@@ -227,6 +225,9 @@ struct ffs_io_data {
bool use_sg;
struct ffs_data *ffs;
+
+ int status;
+ struct completion done;
};
struct ffs_desc_helper {
@@ -707,12 +708,15 @@ static const struct file_operations ffs_ep0_operations = {
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
+ struct ffs_io_data *io_data = req->context;
+
ENTER();
- if (req->context) {
- struct ffs_ep *ep = _ep->driver_data;
- ep->status = req->status ? req->status : req->actual;
- complete(req->context);
- }
+ if (req->status)
+ io_data->status = req->status;
+ else
+ io_data->status = req->actual;
+
+ complete(&io_data->done);
}
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
@@ -1050,7 +1054,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} else if (!io_data->aio) {
- DECLARE_COMPLETION_ONSTACK(done);
bool interrupted = false;
req = ep->req;
@@ -1066,7 +1069,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
io_data->buf = data;
- req->context = &done;
+ init_completion(&io_data->done);
+ req->context = io_data;
req->complete = ffs_epfile_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
@@ -1075,7 +1079,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
- if (wait_for_completion_interruptible(&done)) {
+ if (wait_for_completion_interruptible(&io_data->done)) {
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ if (epfile->ep != ep) {
+ ret = -ESHUTDOWN;
+ goto error_lock;
+ }
/*
* To avoid race condition with ffs_epfile_io_complete,
* dequeue the request first then check
@@ -1083,17 +1092,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
- wait_for_completion(&done);
- interrupted = ep->status < 0;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ wait_for_completion(&io_data->done);
+ interrupted = io_data->status < 0;
}
if (interrupted)
ret = -EINTR;
- else if (io_data->read && ep->status > 0)
- ret = __ffs_epfile_read_data(epfile, data, ep->status,
+ else if (io_data->read && io_data->status > 0)
+ ret = __ffs_epfile_read_data(epfile, data, io_data->status,
&io_data->data);
else
- ret = ep->status;
+ ret = io_data->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 6f5d45ef2e39..f51694f29de9 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -775,9 +775,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
- if (get_ether_addr(dev_addr, addr))
+ if (get_ether_addr(dev_addr, addr)) {
+ net->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
+ } else {
+ net->addr_assign_type = NET_ADDR_SET;
+ }
eth_hw_addr_set(net, addr);
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
@@ -844,6 +848,10 @@ struct net_device *gether_setup_name_default(const char *netname)
eth_random_addr(dev->dev_mac);
pr_warn("using random %s ethernet address\n", "self");
+
+ /* by default we always have a random MAC address */
+ net->addr_assign_type = NET_ADDR_RANDOM;
+
eth_random_addr(dev->host_mac);
pr_warn("using random %s ethernet address\n", "host");
@@ -871,7 +879,6 @@ int gether_register_netdev(struct net_device *net)
dev = netdev_priv(net);
g = dev->gadget;
- net->addr_assign_type = NET_ADDR_RANDOM;
eth_hw_addr_set(net, dev->dev_mac);
status = register_netdev(net);
@@ -912,6 +919,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
if (get_ether_addr(dev_addr, new_addr))
return -EINVAL;
memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+ net->addr_assign_type = NET_ADDR_SET;
return 0;
}
EXPORT_SYMBOL_GPL(gether_set_dev_addr);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index e5a6b6e36b3d..4303a3283ba0 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2371,6 +2371,7 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ int size = min(sizeof(opts->aname), len + 1); \
int ret = 0; \
\
mutex_lock(&opts->lock); \
@@ -2379,8 +2380,9 @@ static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\
goto end; \
} \
\
- ret = snprintf(opts->aname, min(sizeof(opts->aname), len), \
- "%s", page); \
+ ret = strscpy(opts->aname, page, size); \
+ if (ret == -E2BIG) \
+ ret = size - 1; \
\
end: \
mutex_unlock(&opts->lock); \
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index a9bb4553db84..d42bb3346745 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -424,6 +424,9 @@ static void uvcg_video_pump(struct work_struct *work)
uvcg_queue_cancel(queue, 0);
break;
}
+
+ /* Endpoint now owns the request */
+ req = NULL;
video->req_int_count++;
}
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 241740024c50..2acece16b890 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
@@ -36,6 +37,9 @@ MODULE_LICENSE("GPL");
/*----------------------------------------------------------------------*/
+static DEFINE_IDA(driver_id_numbers);
+#define DRIVER_DRIVER_NAME_LENGTH_MAX 32
+
#define RAW_EVENT_QUEUE_SIZE 16
struct raw_event_queue {
@@ -161,6 +165,9 @@ struct raw_dev {
/* Reference to misc device: */
struct device *dev;
+ /* Make driver names unique */
+ int driver_id_number;
+
/* Protected by lock: */
enum dev_state state;
bool gadget_registered;
@@ -189,6 +196,7 @@ static struct raw_dev *dev_new(void)
spin_lock_init(&dev->lock);
init_completion(&dev->ep0_done);
raw_event_queue_init(&dev->queue);
+ dev->driver_id_number = -1;
return dev;
}
@@ -199,6 +207,9 @@ static void dev_free(struct kref *kref)
kfree(dev->udc_name);
kfree(dev->driver.udc_name);
+ kfree(dev->driver.driver.name);
+ if (dev->driver_id_number >= 0)
+ ida_free(&driver_id_numbers, dev->driver_id_number);
if (dev->req) {
if (dev->ep0_urb_queued)
usb_ep_dequeue(dev->gadget->ep0, dev->req);
@@ -419,9 +430,11 @@ out_put:
static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
{
int ret = 0;
+ int driver_id_number;
struct usb_raw_init arg;
char *udc_driver_name;
char *udc_device_name;
+ char *driver_driver_name;
unsigned long flags;
if (copy_from_user(&arg, (void __user *)value, sizeof(arg)))
@@ -440,36 +453,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
return -EINVAL;
}
+ driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL);
+ if (driver_id_number < 0)
+ return driver_id_number;
+
+ driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL);
+ if (!driver_driver_name) {
+ ret = -ENOMEM;
+ goto out_free_driver_id_number;
+ }
+ snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX,
+ DRIVER_NAME ".%d", driver_id_number);
+
udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
- if (!udc_driver_name)
- return -ENOMEM;
+ if (!udc_driver_name) {
+ ret = -ENOMEM;
+ goto out_free_driver_driver_name;
+ }
ret = strscpy(udc_driver_name, &arg.driver_name[0],
UDC_NAME_LENGTH_MAX);
- if (ret < 0) {
- kfree(udc_driver_name);
- return ret;
- }
+ if (ret < 0)
+ goto out_free_udc_driver_name;
ret = 0;
udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL);
if (!udc_device_name) {
- kfree(udc_driver_name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_udc_driver_name;
}
ret = strscpy(udc_device_name, &arg.device_name[0],
UDC_NAME_LENGTH_MAX);
- if (ret < 0) {
- kfree(udc_driver_name);
- kfree(udc_device_name);
- return ret;
- }
+ if (ret < 0)
+ goto out_free_udc_device_name;
ret = 0;
spin_lock_irqsave(&dev->lock, flags);
if (dev->state != STATE_DEV_OPENED) {
dev_dbg(dev->dev, "fail, device is not opened\n");
- kfree(udc_driver_name);
- kfree(udc_device_name);
ret = -EINVAL;
goto out_unlock;
}
@@ -484,14 +504,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value)
dev->driver.suspend = gadget_suspend;
dev->driver.resume = gadget_resume;
dev->driver.reset = gadget_reset;
- dev->driver.driver.name = DRIVER_NAME;
+ dev->driver.driver.name = driver_driver_name;
dev->driver.udc_name = udc_device_name;
dev->driver.match_existing_only = 1;
+ dev->driver_id_number = driver_id_number;
dev->state = STATE_DEV_INITIALIZED;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
+out_free_udc_device_name:
+ kfree(udc_device_name);
+out_free_udc_driver_name:
+ kfree(udc_driver_name);
+out_free_driver_driver_name:
+ kfree(driver_driver_name);
+out_free_driver_id_number:
+ ida_free(&driver_id_numbers, driver_id_number);
return ret;
}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 6117ae8e7242..cea10cdb83ae 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3016,6 +3016,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
}
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
+ of_node_put(isp1301_node);
if (!udc->isp1301_i2c_client) {
return -EPROBE_DEFER;
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 385be30baad3..896c0d107f72 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -76,14 +76,9 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
return -ENODEV;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(&pdev->dev));
- return -ENODEV;
- }
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent,
&pdev->dev, dev_name(&pdev->dev), NULL);
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 44a7e58a26e3..e5df17522892 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -112,6 +112,9 @@ static struct platform_device *fsl_usb2_device_register(
goto error;
}
+ pdev->dev.of_node = ofdev->dev.of_node;
+ pdev->dev.of_node_reused = true;
+
retval = platform_device_add(pdev);
if (retval)
goto error;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index c54f2bc23d3f..0fdc014c9401 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fac9492a8bda..dce6c0ec8d34 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -61,6 +61,8 @@
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
+#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e
+#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
@@ -269,7 +271,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f0ab63138016..65858f607437 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -611,15 +611,37 @@ static int xhci_init(struct usb_hcd *hcd)
static int xhci_run_finished(struct xhci_hcd *xhci)
{
+ unsigned long flags;
+ u32 temp;
+
+ /*
+ * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
+ * Protect the short window before host is running with a lock
+ */
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
+ temp = readl(&xhci->op_regs->command);
+ temp |= (CMD_EIE);
+ writel(temp, &xhci->op_regs->command);
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
+ temp = readl(&xhci->ir_set->irq_pending);
+ writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
+
if (xhci_start(xhci)) {
xhci_halt(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
return -ENODEV;
}
+
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
return 0;
}
@@ -668,19 +690,6 @@ int xhci_run(struct usb_hcd *hcd)
temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
writel(temp, &xhci->ir_set->irq_control);
- /* Set the HCD state before we enable the irqs */
- temp = readl(&xhci->op_regs->command);
- temp |= (CMD_EIE);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Enable interrupts, cmd = 0x%x.", temp);
- writel(temp, &xhci->op_regs->command);
-
- temp = readl(&xhci->ir_set->irq_pending);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
- xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
- writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
-
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -782,6 +791,8 @@ static void xhci_stop(struct usb_hcd *hcd)
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned long flags;
+ int i;
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -797,12 +808,21 @@ void xhci_shutdown(struct usb_hcd *hcd)
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
- spin_lock_irq(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
xhci_halt(xhci);
+
+ /* Power off USB2 ports*/
+ for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
+ xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
+
+ /* Power off USB3 ports*/
+ for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
+ xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
+
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
- spin_unlock_irq(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
xhci_cleanup_msix(xhci);
@@ -1107,7 +1127,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
- struct usb_hcd *secondary_hcd;
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
@@ -1214,23 +1233,19 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
* first with the primary HCD, and then with the secondary HCD.
* If we don't do the same, the host will never be started.
*/
- if (!usb_hcd_is_primary_hcd(hcd))
- secondary_hcd = hcd;
- else
- secondary_hcd = xhci->shared_hcd;
-
xhci_dbg(xhci, "Initialize the xhci_hcd\n");
- retval = xhci_init(hcd->primary_hcd);
+ retval = xhci_init(hcd);
if (retval)
return retval;
comp_timer_running = true;
xhci_dbg(xhci, "Start the primary HCD\n");
- retval = xhci_run(hcd->primary_hcd);
- if (!retval && secondary_hcd) {
+ retval = xhci_run(hcd);
+ if (!retval && xhci->shared_hcd) {
xhci_dbg(xhci, "Start the secondary HCD\n");
- retval = xhci_run(secondary_hcd);
+ retval = xhci_run(xhci->shared_hcd);
}
+
hcd->state = HC_STATE_SUSPENDED;
if (xhci->shared_hcd)
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 0bd76c94a4b1..28aaf031f9a8 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2196,6 +2196,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
+void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
+ bool on, unsigned long *flags);
void xhci_hc_died(struct xhci_hcd *xhci);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b440d338a895..d5a3986dfee7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* Belimo Automation devices */
+ { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) },
+ { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) },
/* ICP DAS I-756xU devices */
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index d1a9564697a4..4e92c165c86b 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1569,6 +1569,12 @@
#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
/*
+ * Belimo Automation
+ */
+#define BELIMO_ZTH_PID 0x8050
+#define BELIMO_ZIP_PID 0xC811
+
+/*
* Unjo AB
*/
#define UNJO_VID 0x22B7
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a7b3c15957ba..feba2a8d1233 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -166,6 +166,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
@@ -204,6 +205,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
{ }
};
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 52cbc353051f..9a6f742ad3ab 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -212,6 +212,7 @@
//
// Definitions for other product IDs
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
+#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4)
#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e60425bbf537..de59fa919540 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
+#define QUECTEL_PRODUCT_EM05G 0x030a
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
+#define QUECTEL_PRODUCT_RM500K 0x7001
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -432,6 +434,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
+#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
+#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
@@ -1132,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ .driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1145,6 +1151,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1277,6 +1284,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
@@ -1979,6 +1987,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 3506c47e1eef..40b1ab3d284d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -436,22 +436,27 @@ static int pl2303_detect_type(struct usb_serial *serial)
break;
case 0x200:
switch (bcdDevice) {
- case 0x100:
+ case 0x100: /* GC */
case 0x105:
+ return TYPE_HXN;
+ case 0x300: /* GT / TA */
+ if (pl2303_supports_hx_status(serial))
+ return TYPE_TA;
+ fallthrough;
case 0x305:
+ case 0x400: /* GL */
case 0x405:
+ return TYPE_HXN;
+ case 0x500: /* GE / TB */
+ if (pl2303_supports_hx_status(serial))
+ return TYPE_TB;
+ fallthrough;
+ case 0x505:
+ case 0x600: /* GS */
case 0x605:
- /*
- * Assume it's an HXN-type if the device doesn't
- * support the old read request value.
- */
- if (!pl2303_supports_hx_status(serial))
- return TYPE_HXN;
- break;
- case 0x300:
- return TYPE_TA;
- case 0x500:
- return TYPE_TB;
+ case 0x700: /* GR */
+ case 0x705:
+ return TYPE_HXN;
}
break;
}
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ee0e520707dd..c4724750c81a 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1718,6 +1718,7 @@ void typec_set_pwr_opmode(struct typec_port *port,
partner->usb_pd = 1;
sysfs_notify(&partner_dev->kobj, NULL,
"supports_usb_power_delivery");
+ kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE);
}
put_device(partner_dev);
}
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 557f392fe24d..073fd2ea5e0b 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -56,7 +56,6 @@ config TYPEC_WCOVE
tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
depends on ACPI
depends on MFD_INTEL_PMC_BXT
- depends on INTEL_SOC_PMIC
depends on BXT_WC_PMIC_OPREGION
help
This driver adds support for USB Type-C on Intel Broxton platforms
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index b7a955479156..e85c1d71f4ed 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue {
/* Resources for implementing the notification channel from the device
* to the driver. fwqp is the firmware end of an RC connection; the
- * other end is vqqp used by the driver. cq is is where completions are
+ * other end is vqqp used by the driver. cq is where completions are
* reported.
*/
struct mlx5_vdpa_cq cq;
@@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
id = mlx5vdpa16_to_cpu(mvdev, vlan);
mac_vlan_del(ndev, ndev->config.mac, id, true);
+ status = VIRTIO_NET_OK;
break;
default:
- break;
-}
+ break;
+ }
-return status;
+ return status;
}
static void mlx5_cvq_kick_handler(struct work_struct *work)
@@ -1961,6 +1962,8 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_c
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
ndev->event_cbs[idx] = *cb;
+ if (is_ctrl_vq_idx(mvdev, idx))
+ mvdev->cvq.event_cb = *cb;
}
static void mlx5_cvq_notify(struct vringh *vring)
@@ -2173,7 +2176,6 @@ static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
- struct mlx5_control_vq *cvq = &mvdev->cvq;
int err;
int i;
@@ -2183,16 +2185,6 @@ static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
goto err_vq;
}
- if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
- err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
- MLX5_CVQ_MAX_ENT, false,
- (struct vring_desc *)(uintptr_t)cvq->desc_addr,
- (struct vring_avail *)(uintptr_t)cvq->driver_addr,
- (struct vring_used *)(uintptr_t)cvq->device_addr);
- if (err)
- goto err_vq;
- }
-
return 0;
err_vq:
@@ -2465,6 +2457,21 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
ndev->mvdev.cvq.ready = false;
}
+static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+ int err = 0;
+
+ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
+ err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
+ MLX5_CVQ_MAX_ENT, false,
+ (struct vring_desc *)(uintptr_t)cvq->desc_addr,
+ (struct vring_avail *)(uintptr_t)cvq->driver_addr,
+ (struct vring_used *)(uintptr_t)cvq->device_addr);
+
+ return err;
+}
+
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2477,6 +2484,11 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ err = setup_cvq_vring(mvdev);
+ if (err) {
+ mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n");
+ goto err_setup;
+ }
err = setup_driver(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index d503848b3b6e..3bc27de58f46 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config,
dev->minor = ret;
dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
- dev->dev = device_create(vduse_class, NULL,
- MKDEV(MAJOR(vduse_major), dev->minor),
- dev, "%s", config->name);
+ dev->dev = device_create_with_groups(vduse_class, NULL,
+ MKDEV(MAJOR(vduse_major), dev->minor),
+ dev, vduse_dev_groups, "%s", config->name);
if (IS_ERR(dev->dev)) {
ret = PTR_ERR(dev->dev);
goto err_dev;
@@ -1476,16 +1476,12 @@ static char *vduse_devnode(struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
}
-static void vduse_mgmtdev_release(struct device *dev)
-{
-}
-
-static struct device vduse_mgmtdev = {
- .init_name = "vduse",
- .release = vduse_mgmtdev_release,
+struct vduse_mgmt_dev {
+ struct vdpa_mgmt_dev mgmt_dev;
+ struct device dev;
};
-static struct vdpa_mgmt_dev mgmt_dev;
+static struct vduse_mgmt_dev *vduse_mgmt;
static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
{
@@ -1510,7 +1506,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
}
set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
vdev->vdpa.dma_dev = &vdev->vdpa.dev;
- vdev->vdpa.mdev = &mgmt_dev;
+ vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
return 0;
}
@@ -1556,34 +1552,52 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
-static struct vdpa_mgmt_dev mgmt_dev = {
- .device = &vduse_mgmtdev,
- .id_table = id_table,
- .ops = &vdpa_dev_mgmtdev_ops,
-};
+static void vduse_mgmtdev_release(struct device *dev)
+{
+ struct vduse_mgmt_dev *mgmt_dev;
+
+ mgmt_dev = container_of(dev, struct vduse_mgmt_dev, dev);
+ kfree(mgmt_dev);
+}
static int vduse_mgmtdev_init(void)
{
int ret;
- ret = device_register(&vduse_mgmtdev);
- if (ret)
+ vduse_mgmt = kzalloc(sizeof(*vduse_mgmt), GFP_KERNEL);
+ if (!vduse_mgmt)
+ return -ENOMEM;
+
+ ret = dev_set_name(&vduse_mgmt->dev, "vduse");
+ if (ret) {
+ kfree(vduse_mgmt);
return ret;
+ }
- ret = vdpa_mgmtdev_register(&mgmt_dev);
+ vduse_mgmt->dev.release = vduse_mgmtdev_release;
+
+ ret = device_register(&vduse_mgmt->dev);
if (ret)
- goto err;
+ goto dev_reg_err;
- return 0;
-err:
- device_unregister(&vduse_mgmtdev);
+ vduse_mgmt->mgmt_dev.id_table = id_table;
+ vduse_mgmt->mgmt_dev.ops = &vdpa_dev_mgmtdev_ops;
+ vduse_mgmt->mgmt_dev.device = &vduse_mgmt->dev;
+ ret = vdpa_mgmtdev_register(&vduse_mgmt->mgmt_dev);
+ if (ret)
+ device_unregister(&vduse_mgmt->dev);
+
+ return ret;
+
+dev_reg_err:
+ put_device(&vduse_mgmt->dev);
return ret;
}
static void vduse_mgmtdev_exit(void)
{
- vdpa_mgmtdev_unregister(&mgmt_dev);
- device_unregister(&vduse_mgmtdev);
+ vdpa_mgmtdev_unregister(&vduse_mgmt->mgmt_dev);
+ device_unregister(&vduse_mgmt->dev);
}
static int vduse_init(void)
@@ -1596,7 +1610,6 @@ static int vduse_init(void)
return PTR_ERR(vduse_class);
vduse_class->devnode = vduse_devnode;
- vduse_class->dev_groups = vduse_dev_groups;
ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
if (ret)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 61e71c1154be..e60b06f2ac22 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -549,6 +549,16 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
if (!iommu_group)
return ERR_PTR(-EINVAL);
+ /*
+ * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+ * restore cache coherency. It has to be checked here because it is only
+ * valid for cases where we are using iommu groups.
+ */
+ if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
+ iommu_group_put(iommu_group);
+ return ERR_PTR(-EINVAL);
+ }
+
group = vfio_group_get_from_iommu(iommu_group);
if (!group)
group = vfio_create_group(iommu_group, VFIO_IOMMU);
@@ -601,13 +611,6 @@ static int __vfio_register_dev(struct vfio_device *device,
int vfio_register_group_dev(struct vfio_device *device)
{
- /*
- * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
- * restore cache coherency.
- */
- if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY))
- return -EINVAL;
-
return __vfio_register_dev(device,
vfio_group_find_or_alloc(device->dev));
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 935a1d0ddb97..23dcbfdfa13b 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
case VHOST_VDPA_GET_VRING_GROUP:
+ if (!ops->get_vq_group)
+ return -EOPNOTSUPP;
s.index = idx;
s.num = ops->get_vq_group(vdpa, idx);
if (s.num >= vdpa->ngroups)
@@ -1207,7 +1209,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_dev_stop(&v->vdev);
vhost_vdpa_free_domain(v);
vhost_vdpa_config_put(v);
- vhost_dev_cleanup(&v->vdev);
+ vhost_vdpa_cleanup(v);
mutex_unlock(&d->mutex);
atomic_dec(&v->opened);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 14e2043d7685..eab55accf381 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
int (*copy)(const struct vringh *vrh,
void *dst, const void *src, size_t len))
{
- int err, count = 0, up_next, desc_max;
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
struct vringh_range range = { -1ULL, 0 }, slowrange;
bool slow = false;
@@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i,
continue;
}
- if (count++ == vrh->vring.num) {
+ if (up_next == -1)
+ count++;
+ else
+ indirect_count++;
+
+ if (count > vrh->vring.num || indirect_count > desc_max) {
vringh_bad("Descriptor loop in %p", descs);
err = -ELOOP;
goto fail;
@@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
i = return_from_indirect(vrh, &up_next,
&descs, &desc_max);
slow = false;
+ indirect_count = 0;
} else
break;
}
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index fa23bf0247b0..bd4dc97d4d34 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -1148,6 +1148,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
return ret;
}
+#if defined(CONFIG_FB_STI)
/* check if given fb_info is the primary device */
int fb_is_primary_device(struct fb_info *info)
{
@@ -1163,6 +1164,7 @@ int fb_is_primary_device(struct fb_info *info)
return (sti->info == info);
}
EXPORT_SYMBOL(fb_is_primary_device);
+#endif
MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer");
MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines");
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 52f731a61482..519313b8bb00 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -560,8 +560,7 @@ int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
/* Blank the LCD */
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
- if (fbdev->lcdclk)
- clk_disable(fbdev->lcdclk);
+ clk_disable(fbdev->lcdclk);
memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
@@ -577,8 +576,7 @@ int au1100fb_drv_resume(struct platform_device *dev)
memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
- if (fbdev->lcdclk)
- clk_enable(fbdev->lcdclk);
+ clk_enable(fbdev->lcdclk);
/* Unblank the LCD */
au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 3d47c347b897..51e072c03e1c 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -2184,12 +2184,6 @@ static struct pci_driver cirrusfb_pci_driver = {
.id_table = cirrusfb_pci_table,
.probe = cirrusfb_pci_register,
.remove = cirrusfb_pci_unregister,
-#ifdef CONFIG_PM
-#if 0
- .suspend = cirrusfb_pci_suspend,
- .resume = cirrusfb_pci_resume,
-#endif
-#endif
};
#endif /* CONFIG_PCI */
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index c4e91715ef00..1a9aa12cf886 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2469,6 +2469,11 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
if (charcount != 256 && charcount != 512)
return -EINVAL;
+ /* font bigger than screen resolution ? */
+ if (w > FBCON_SWAP(info->var.rotate, info->var.xres, info->var.yres) ||
+ h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
+ return -EINVAL;
+
/* Make sure drawing engine can handle the font */
if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
!(info->pixmap.blit_y & (1 << (font->height - 1))))
@@ -2731,6 +2736,34 @@ void fbcon_update_vcs(struct fb_info *info, bool all)
}
EXPORT_SYMBOL(fbcon_update_vcs);
+/* let fbcon check if it supports a new screen resolution */
+int fbcon_modechange_possible(struct fb_info *info, struct fb_var_screeninfo *var)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct vc_data *vc;
+ unsigned int i;
+
+ WARN_CONSOLE_UNLOCKED();
+
+ if (!ops)
+ return 0;
+
+ /* prevent setting a screen size which is smaller than font size */
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ vc = vc_cons[i].d;
+ if (!vc || vc->vc_mode != KD_TEXT ||
+ fbcon_info_from_console(i) != info)
+ continue;
+
+ if (vc->vc_font.width > FBCON_SWAP(var->rotate, var->xres, var->yres) ||
+ vc->vc_font.height > FBCON_SWAP(var->rotate, var->yres, var->xres))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fbcon_modechange_possible);
+
int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode)
{
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index afa2863670f3..7ee6eb2fa715 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
+#include <linux/sysfb.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
@@ -510,7 +511,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
while (n && (n * (logo->width + 8) - 8 > xres))
--n;
- image.dx = (xres - n * (logo->width + 8) - 8) / 2;
+ image.dx = (xres - (n * (logo->width + 8) - 8)) / 2;
image.dy = y ?: (yres - logo->height) / 2;
} else {
image.dx = 0;
@@ -1016,6 +1017,16 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
if (ret)
return ret;
+ /* verify that virtual resolution >= physical resolution */
+ if (var->xres_virtual < var->xres ||
+ var->yres_virtual < var->yres) {
+ pr_warn("WARNING: fbcon: Driver '%s' missed to adjust virtual screen size (%ux%u vs. %ux%u)\n",
+ info->fix.id,
+ var->xres_virtual, var->yres_virtual,
+ var->xres, var->yres);
+ return -EINVAL;
+ }
+
if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_NOW)
return 0;
@@ -1106,7 +1117,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
return -EFAULT;
console_lock();
lock_fb_info(info);
- ret = fb_set_var(info, &var);
+ ret = fbcon_modechange_possible(info, &var);
+ if (!ret)
+ ret = fb_set_var(info, &var);
if (!ret)
fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
unlock_fb_info(info);
@@ -1752,6 +1765,17 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
do_free = true;
}
+ /*
+ * If a driver asked to unregister a platform device registered by
+ * sysfb, then can be assumed that this is a driver for a display
+ * that is set up by the system firmware and has a generic driver.
+ *
+ * Drivers for devices that don't have a generic driver will never
+ * ask for this, so let's assume that a real driver for the display
+ * was already probed and prevent sysfb to register devices later.
+ */
+ sysfb_disable();
+
mutex_lock(&registration_lock);
do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index a9579964eaba..5647fca8c49a 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -472,7 +472,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
struct fb_info *info;
struct intelfb_info *dinfo;
int i, err, dvo;
- int aperture_size, stolen_size;
+ int aperture_size, stolen_size = 0;
struct agp_kern_info gtt_info;
int agp_memtype;
const char *s;
@@ -571,7 +571,7 @@ static int intelfb_pci_register(struct pci_dev *pdev,
return -ENODEV;
}
- if (intelfbhw_get_memory(pdev, &aperture_size,&stolen_size)) {
+ if (intelfbhw_get_memory(pdev, &aperture_size, &stolen_size)) {
cleanup(dinfo);
return -ENODEV;
}
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
index 57aff7450bce..2086e06532ee 100644
--- a/drivers/video/fbdev/intelfb/intelfbhw.c
+++ b/drivers/video/fbdev/intelfb/intelfbhw.c
@@ -201,13 +201,11 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
case PCI_DEVICE_ID_INTEL_945GME:
case PCI_DEVICE_ID_INTEL_965G:
case PCI_DEVICE_ID_INTEL_965GM:
- /* 915, 945 and 965 chipsets support a 256MB aperture.
- Aperture size is determined by inspected the
- base address of the aperture. */
- if (pci_resource_start(pdev, 2) & 0x08000000)
- *aperture_size = MB(128);
- else
- *aperture_size = MB(256);
+ /*
+ * 915, 945 and 965 chipsets support 64MB, 128MB or 256MB
+ * aperture. Determine size from PCI resource length.
+ */
+ *aperture_size = pci_resource_len(pdev, 2);
break;
default:
if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
diff --git a/drivers/video/fbdev/omap/sossi.c b/drivers/video/fbdev/omap/sossi.c
index c90eb8ca58af..66aff6cd1df0 100644
--- a/drivers/video/fbdev/omap/sossi.c
+++ b/drivers/video/fbdev/omap/sossi.c
@@ -359,7 +359,7 @@ static void sossi_set_bits_per_cycle(int bpc)
int bus_pick_count, bus_pick_width;
/*
- * We set explicitly the the bus_pick_count as well, although
+ * We set explicitly the bus_pick_count as well, although
* with remapping/reordering disabled it will be calculated by HW
* as (32 / bus_pick_width).
*/
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 6fbfeb01b315..170463a7e1f4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -143,7 +143,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
/*
* In OMAP5+, the HFBITCLK must be divided by 2 before issuing the
* HDMI_PHYPWRCMD_LDOON command.
- */
+ */
if (phy_feat->bist_ctrl)
REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 043cc8f9ef1c..c3cd1e1cc01b 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -381,7 +381,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff,
struct pxa3xx_gcu_batch *buffer;
struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file);
- int words = count / 4;
+ size_t words = count / 4;
/* Does not need to be atomic. There's a lock in user space,
* but anyhow, this is just for statistics. */
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 2c198561c338..f96ce8801be4 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -237,8 +237,7 @@ static int simplefb_clocks_get(struct simplefb_par *par,
if (IS_ERR(clock)) {
if (PTR_ERR(clock) == -EPROBE_DEFER) {
while (--i >= 0) {
- if (par->clks[i])
- clk_put(par->clks[i]);
+ clk_put(par->clks[i]);
}
kfree(par->clks);
return -EPROBE_DEFER;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index bcacfb6934fa..d119b1d08007 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -96,7 +96,7 @@ static const struct fb_fix_screeninfo xxxfb_fix = {
/*
* Modern graphical hardware not only supports pipelines but some
- * also support multiple monitors where each display can have its
+ * also support multiple monitors where each display can have
* its own unique data. In this case each display could be
* represented by a separate framebuffer device thus a separate
* struct fb_info. Now the struct xxx_par represents the graphics
@@ -838,9 +838,9 @@ static void xxxfb_remove(struct pci_dev *dev)
*
* See Documentation/driver-api/pm/devices.rst for more information
*/
-static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
+static int xxxfb_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct xxxfb_par *par = info->par;
/* suspend here */
@@ -853,9 +853,9 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg)
*
* See Documentation/driver-api/pm/devices.rst for more information
*/
-static int xxxfb_resume(struct pci_dev *dev)
+static int xxxfb_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct xxxfb_par *par = info->par;
/* resume here */
@@ -873,14 +873,15 @@ static const struct pci_device_id xxxfb_id_table[] = {
{ 0, }
};
+static SIMPLE_DEV_PM_OPS(xxxfb_pm_ops, xxxfb_suspend, xxxfb_resume);
+
/* For PCI drivers */
static struct pci_driver xxxfb_driver = {
.name = "xxxfb",
.id_table = xxxfb_id_table,
.probe = xxxfb_probe,
.remove = xxxfb_remove,
- .suspend = xxxfb_suspend, /* optional but recommended */
- .resume = xxxfb_resume, /* optional but recommended */
+ .driver.pm = xxxfb_pm_ops, /* optional but recommended */
};
MODULE_DEVICE_TABLE(pci, xxxfb_id_table);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 90ce16b6e05f..f422f9c58ba7 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct miscdevice *misc;
+ void __iomem *mapping;
int ret;
if (!dev->platform_data)
return -ENODEV;
data = (struct sev_guest_platform_data *)dev->platform_data;
- layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
- if (!layout)
+ mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
+ if (!mapping)
return -ENODEV;
+ layout = (__force void *)mapping;
+
ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev)
@@ -706,7 +709,7 @@ e_free_response:
e_free_request:
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
e_unmap:
- iounmap(layout);
+ iounmap(mapping);
return ret;
}
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index b5adf6abd241..e1556d2a355a 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -6,12 +6,6 @@ config VIRTIO
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
or CONFIG_S390_GUEST.
-config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
- bool
- help
- This option is selected if the architecture may need to enforce
- VIRTIO_F_ACCESS_PLATFORM
-
config VIRTIO_PCI_LIB
tristate
help
@@ -35,6 +29,19 @@ menuconfig VIRTIO_MENU
if VIRTIO_MENU
+config VIRTIO_HARDEN_NOTIFICATION
+ bool "Harden virtio notification"
+ help
+ Enable this to harden the device notifications and suppress
+ those that happen at a time where notifications are illegal.
+
+ Experimental: Note that several drivers still have bugs that
+ may cause crashes or hangs when correct handling of
+ notifications is enforced; depending on the subset of
+ drivers and devices you use, this may or may not work.
+
+ If unsure, say N.
+
config VIRTIO_PCI
tristate "PCI driver for virtio devices"
depends on PCI
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ef04a96942bf..7deeed30d1f3 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/idr.h>
#include <linux/of.h>
+#include <linux/platform-feature.h>
#include <uapi/linux/virtio_ids.h>
/* Unique numbering for virtio devices. */
@@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status);
static int virtio_features_ok(struct virtio_device *dev)
{
unsigned int status;
- int ret;
might_sleep();
- ret = arch_has_restricted_virtio_memory_access();
- if (ret) {
+ if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) {
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
dev_warn(&dev->dev,
"device must provide VIRTIO_F_VERSION_1\n");
@@ -220,6 +219,7 @@ static int virtio_features_ok(struct virtio_device *dev)
* */
void virtio_reset_device(struct virtio_device *dev)
{
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
/*
* The below virtio_synchronize_cbs() guarantees that any
* interrupt for this line arriving after
@@ -228,6 +228,7 @@ void virtio_reset_device(struct virtio_device *dev)
*/
virtio_break_device(dev);
virtio_synchronize_cbs(dev);
+#endif
dev->config->reset(dev);
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index f9a36bc7ac27..083ff1eb743d 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -62,6 +62,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/virtio.h>
@@ -255,7 +256,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status)
/*
* Per memory-barriers.txt, wmb() is not needed to guarantee
- * that the the cache coherent memory writes have completed
+ * that the cache coherent memory writes have completed
* before writing to the MMIO region.
*/
writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
@@ -556,6 +557,28 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.synchronize_cbs = vm_synchronize_cbs,
};
+#ifdef CONFIG_PM_SLEEP
+static int virtio_mmio_freeze(struct device *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+ return virtio_device_freeze(&vm_dev->vdev);
+}
+
+static int virtio_mmio_restore(struct device *dev)
+{
+ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
+
+ if (vm_dev->version == 1)
+ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+
+ return virtio_device_restore(&vm_dev->vdev);
+}
+
+static const struct dev_pm_ops virtio_mmio_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
+};
+#endif
static void virtio_mmio_release_dev(struct device *_d)
{
@@ -701,6 +724,7 @@ static int vm_cmdline_set(const char *device,
if (!vm_cmdline_parent_registered) {
err = device_register(&vm_cmdline_parent);
if (err) {
+ put_device(&vm_cmdline_parent);
pr_err("Failed to register parent device!\n");
return err;
}
@@ -798,6 +822,9 @@ static struct platform_driver virtio_mmio_driver = {
.name = "virtio-mmio",
.of_match_table = virtio_mmio_match,
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &virtio_mmio_pm_ops,
+#endif
},
};
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index a0fa14f28a7f..fa2a9445bb18 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -220,8 +220,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
check_offsets();
- mdev->pci_dev = pci_dev;
-
/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
return -ENODEV;
@@ -469,7 +467,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
/*
* Per memory-barriers.txt, wmb() is not needed to guarantee
- * that the the cache coherent memory writes have completed
+ * that the cache coherent memory writes have completed
* before writing to the MMIO region.
*/
vp_iowrite8(status, &cfg->device_status);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 13a7348cedff..643ca779fcc6 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -111,7 +111,12 @@ struct vring_virtqueue {
/* Number we've added since last sync. */
unsigned int num_added;
- /* Last used index we've seen. */
+ /* Last used index we've seen.
+ * for split ring, it just contains last used index
+ * for packed ring:
+ * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
+ * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
+ */
u16 last_used_idx;
/* Hint for event idx: already triggered no need to disable. */
@@ -154,9 +159,6 @@ struct vring_virtqueue {
/* Driver ring wrap counter. */
bool avail_wrap_counter;
- /* Device ring wrap counter. */
- bool used_wrap_counter;
-
/* Avail used flags. */
u16 avail_used_flags;
@@ -933,7 +935,7 @@ static struct virtqueue *vring_create_virtqueue_split(
for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (queue)
break;
if (!may_reduce_num)
@@ -973,6 +975,15 @@ static struct virtqueue *vring_create_virtqueue_split(
/*
* Packed ring specific functions - *_packed().
*/
+static inline bool packed_used_wrap_counter(u16 last_used_idx)
+{
+ return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
+}
+
+static inline u16 packed_last_used(u16 last_used_idx)
+{
+ return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
+}
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
struct vring_desc_extra *extra)
@@ -1406,8 +1417,14 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
- return is_used_desc_packed(vq, vq->last_used_idx,
- vq->packed.used_wrap_counter);
+ u16 last_used;
+ u16 last_used_idx;
+ bool used_wrap_counter;
+
+ last_used_idx = READ_ONCE(vq->last_used_idx);
+ last_used = packed_last_used(last_used_idx);
+ used_wrap_counter = packed_used_wrap_counter(last_used_idx);
+ return is_used_desc_packed(vq, last_used, used_wrap_counter);
}
static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
@@ -1415,7 +1432,8 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
void **ctx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- u16 last_used, id;
+ u16 last_used, id, last_used_idx;
+ bool used_wrap_counter;
void *ret;
START_USE(vq);
@@ -1434,7 +1452,9 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
/* Only get used elements after they have been exposed by host. */
virtio_rmb(vq->weak_barriers);
- last_used = vq->last_used_idx;
+ last_used_idx = READ_ONCE(vq->last_used_idx);
+ used_wrap_counter = packed_used_wrap_counter(last_used_idx);
+ last_used = packed_last_used(last_used_idx);
id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
@@ -1451,12 +1471,15 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
ret = vq->packed.desc_state[id].data;
detach_buf_packed(vq, id, ctx);
- vq->last_used_idx += vq->packed.desc_state[id].num;
- if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
- vq->last_used_idx -= vq->packed.vring.num;
- vq->packed.used_wrap_counter ^= 1;
+ last_used += vq->packed.desc_state[id].num;
+ if (unlikely(last_used >= vq->packed.vring.num)) {
+ last_used -= vq->packed.vring.num;
+ used_wrap_counter ^= 1;
}
+ last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
+ WRITE_ONCE(vq->last_used_idx, last_used);
+
/*
* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
@@ -1465,9 +1488,7 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
virtio_store_mb(vq->weak_barriers,
&vq->packed.vring.driver->off_wrap,
- cpu_to_le16(vq->last_used_idx |
- (vq->packed.used_wrap_counter <<
- VRING_PACKED_EVENT_F_WRAP_CTR)));
+ cpu_to_le16(vq->last_used_idx));
LAST_ADD_TIME_INVALID(vq);
@@ -1499,9 +1520,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
if (vq->event) {
vq->packed.vring.driver->off_wrap =
- cpu_to_le16(vq->last_used_idx |
- (vq->packed.used_wrap_counter <<
- VRING_PACKED_EVENT_F_WRAP_CTR));
+ cpu_to_le16(vq->last_used_idx);
/*
* We need to update event offset and event wrap
* counter first before updating event flags.
@@ -1518,8 +1537,7 @@ static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
}
END_USE(vq);
- return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
- VRING_PACKED_EVENT_F_WRAP_CTR);
+ return vq->last_used_idx;
}
static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
@@ -1537,7 +1555,7 @@ static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- u16 used_idx, wrap_counter;
+ u16 used_idx, wrap_counter, last_used_idx;
u16 bufs;
START_USE(vq);
@@ -1550,9 +1568,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
if (vq->event) {
/* TODO: tune this threshold */
bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
- wrap_counter = vq->packed.used_wrap_counter;
+ last_used_idx = READ_ONCE(vq->last_used_idx);
+ wrap_counter = packed_used_wrap_counter(last_used_idx);
- used_idx = vq->last_used_idx + bufs;
+ used_idx = packed_last_used(last_used_idx) + bufs;
if (used_idx >= vq->packed.vring.num) {
used_idx -= vq->packed.vring.num;
wrap_counter ^= 1;
@@ -1582,9 +1601,10 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
*/
virtio_mb(vq->weak_barriers);
- if (is_used_desc_packed(vq,
- vq->last_used_idx,
- vq->packed.used_wrap_counter)) {
+ last_used_idx = READ_ONCE(vq->last_used_idx);
+ wrap_counter = packed_used_wrap_counter(last_used_idx);
+ used_idx = packed_last_used(last_used_idx);
+ if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
END_USE(vq);
return false;
}
@@ -1688,8 +1708,12 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
vq->broken = true;
- vq->last_used_idx = 0;
+#else
+ vq->broken = false;
+#endif
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
vq->event_triggered = false;
vq->num_added = 0;
vq->packed_ring = true;
@@ -1720,7 +1744,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->packed.next_avail_idx = 0;
vq->packed.avail_wrap_counter = 1;
- vq->packed.used_wrap_counter = 1;
vq->packed.event_flags_shadow = 0;
vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
@@ -2135,9 +2158,13 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
if (unlikely(vq->broken)) {
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
dev_warn_once(&vq->vq.vdev->dev,
"virtio vring IRQ raised before DRIVER_OK");
return IRQ_NONE;
+#else
+ return IRQ_HANDLED;
+#endif
}
/* Just a hint for performance: so it's ok that this can be racy! */
@@ -2180,7 +2207,11 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
+#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
vq->broken = true;
+#else
+ vq->broken = false;
+#endif
vq->last_used_idx = 0;
vq->event_triggered = false;
vq->num_added = 0;
diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c
index b0b2d7a6fdde..2fd85be88278 100644
--- a/drivers/watchdog/gxp-wdt.c
+++ b/drivers/watchdog/gxp-wdt.c
@@ -172,3 +172,4 @@ module_platform_driver(gxp_wdt_driver);
MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>");
MODULE_DESCRIPTION("Driver for GXP watchdog timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 120d32f164ac..bfd5f4f706bc 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC
having to balloon out RAM regions in order to obtain physical memory
space to create such mappings.
+config XEN_GRANT_DMA_IOMMU
+ bool
+ select IOMMU_API
+
+config XEN_GRANT_DMA_OPS
+ bool
+ select DMA_OPS
+
+config XEN_VIRTIO
+ bool "Xen virtio support"
+ depends on VIRTIO
+ select XEN_GRANT_DMA_OPS
+ select XEN_GRANT_DMA_IOMMU if OF
+ help
+ Enable virtio support for running as Xen guest. Depending on the
+ guest type this will require special support on the backend side
+ (qemu or kernel, depending on the virtio device types used).
+
+ If in doubt, say n.
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 5aae66e638a7..c0503f1c7d5b 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -39,3 +39,5 @@ xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
+obj-$(CONFIG_XEN_GRANT_DMA_OPS) += grant-dma-ops.o
+obj-$(CONFIG_XEN_GRANT_DMA_IOMMU) += grant-dma-iommu.o
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
index 7b591443833c..87f1828d40d5 100644
--- a/drivers/xen/features.c
+++ b/drivers/xen/features.c
@@ -42,7 +42,7 @@ void xen_setup_features(void)
if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
break;
for (j = 0; j < 32; j++)
- xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
+ xen_features[i * 32 + j] = !!(fi.submap & 1U << j);
}
if (xen_pv_domain()) {
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 20d7d059dadb..40ef379c28ab 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -16,6 +16,7 @@
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <xen/interface/event_channel.h>
+#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@@ -56,6 +57,7 @@ struct gntdev_grant_map {
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
+ bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
@@ -73,6 +75,11 @@ struct gntdev_grant_map {
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
+
+ /* Number of live grants */
+ atomic_t live_grants;
+ /* Needed to avoid allocation in __unmap_grant_pages */
+ struct gntab_unmap_queue_data unmap_data;
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 59ffea800079..84b143eef395 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -60,10 +61,11 @@ module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
+/* True in PV mode, false otherwise */
static int use_ptemod;
-static int unmap_grant_pages(struct gntdev_grant_map *map,
- int offset, int pages);
+static void unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
kvfree(map->unmap_ops);
kvfree(map->kmap_ops);
kvfree(map->kunmap_ops);
+ kvfree(map->being_removed);
kfree(map);
}
@@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
GFP_KERNEL);
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->being_removed =
+ kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
- NULL == add->pages)
+ NULL == add->pages ||
+ NULL == add->being_removed)
goto err;
if (use_ptemod) {
add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
@@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
- if (map->pages && !use_ptemod)
+ if (map->pages && !use_ptemod) {
+ /*
+ * Increment the reference count. This ensures that the
+ * subsequent call to unmap_grant_pages() will not wind up
+ * re-entering itself. It *can* wind up calling
+ * gntdev_put_map() recursively, but such calls will be with a
+ * reference count greater than 1, so they will return before
+ * this code is reached. The recursion depth is thus limited to
+ * 1. Do NOT use refcount_inc() here, as it will detect that
+ * the reference count is zero and WARN().
+ */
+ refcount_set(&map->users, 1);
+
+ /*
+ * Unmap the grants. This may or may not be asynchronous, so it
+ * is possible that the reference count is 1 on return, but it
+ * could also be greater than 1.
+ */
unmap_grant_pages(map, 0, map->count);
+ /* Check if the memory now needs to be freed */
+ if (!refcount_dec_and_test(&map->users))
+ return;
+
+ /*
+ * All pages have been returned to the hypervisor, so free the
+ * map.
+ */
+ }
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
@@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
+ size_t alloced = 0;
int i, err = 0;
if (!use_ptemod) {
@@ -331,97 +365,118 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
map->count);
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status == GNTST_okay)
+ if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- else if (!err)
+ if (!use_ptemod)
+ alloced++;
+ } else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) {
- if (map->kmap_ops[i].status == GNTST_okay)
+ if (map->kmap_ops[i].status == GNTST_okay) {
+ if (map->map_ops[i].status == GNTST_okay)
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
- else if (!err)
+ } else if (!err)
err = -EINVAL;
}
}
+ atomic_add(alloced, &map->live_grants);
return err;
}
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void __unmap_grant_pages_done(int result,
+ struct gntab_unmap_queue_data *data)
{
- int i, err = 0;
- struct gntab_unmap_queue_data unmap_data;
-
- if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
- int pgno = (map->notify.addr >> PAGE_SHIFT);
- if (pgno >= offset && pgno < offset + pages) {
- /* No need for kmap, pages are in lowmem */
- uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
- tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
- map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
- }
- }
-
- unmap_data.unmap_ops = map->unmap_ops + offset;
- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
- unmap_data.pages = map->pages + offset;
- unmap_data.count = pages;
-
- err = gnttab_unmap_refs_sync(&unmap_data);
- if (err)
- return err;
+ unsigned int i;
+ struct gntdev_grant_map *map = data->data;
+ unsigned int offset = data->unmap_ops - map->unmap_ops;
- for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
+ for (i = 0; i < data->count; i++) {
+ WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay &&
+ map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("unmap handle=%d st=%d\n",
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
if (use_ptemod) {
- if (map->kunmap_ops[offset+i].status)
- err = -EINVAL;
+ WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay &&
+ map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);
pr_debug("kunmap handle=%u st=%d\n",
map->kunmap_ops[offset+i].handle,
map->kunmap_ops[offset+i].status);
map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
}
}
- return err;
+ /*
+ * Decrease the live-grant counter. This must happen after the loop to
+ * prevent premature reuse of the grants by gnttab_mmap().
+ */
+ atomic_sub(data->count, &map->live_grants);
+
+ /* Release reference taken by __unmap_grant_pages */
+ gntdev_put_map(NULL, map);
+}
+
+static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
+{
+ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ int pgno = (map->notify.addr >> PAGE_SHIFT);
+
+ if (pgno >= offset && pgno < offset + pages) {
+ /* No need for kmap, pages are in lowmem */
+ uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
+
+ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ }
+ }
+
+ map->unmap_data.unmap_ops = map->unmap_ops + offset;
+ map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.pages = map->pages + offset;
+ map->unmap_data.count = pages;
+ map->unmap_data.done = __unmap_grant_pages_done;
+ map->unmap_data.data = map;
+ refcount_inc(&map->users); /* to keep map alive during async call below */
+
+ gnttab_unmap_refs_async(&map->unmap_data);
}
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
- int range, err = 0;
+ int range;
+
+ if (atomic_read(&map->live_grants) == 0)
+ return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
- while (pages && !err) {
- while (pages &&
- map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
+ while (pages) {
+ while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset + range].handle ==
- INVALID_GRANT_HANDLE)
+ if (map->being_removed[offset + range])
break;
+ map->being_removed[offset + range] = true;
range++;
}
- err = __unmap_grant_pages(map, offset, range);
+ if (range)
+ __unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
-
- return err;
}
/* ------------------------------------------------------------------ */
@@ -473,7 +528,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
struct gntdev_grant_map *map =
container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
- int err;
if (!mmu_notifier_range_blockable(range))
return false;
@@ -494,10 +548,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
range->start, range->end, mstart, mend);
- err = unmap_grant_pages(map,
+ unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
return true;
}
@@ -985,6 +1038,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
if (use_ptemod && map->vma)
goto unlock_out;
+ if (atomic_read(&map->live_grants)) {
+ err = -EAGAIN;
+ goto unlock_out;
+ }
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
new file mode 100644
index 000000000000..16b8bc0c0b33
--- /dev/null
+++ b/drivers/xen/grant-dma-iommu.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Stub IOMMU driver which does nothing.
+ * The main purpose of it being present is to reuse generic IOMMU device tree
+ * bindings by Xen grant DMA-mapping layer.
+ *
+ * Copyright (C) 2022 EPAM Systems Inc.
+ */
+
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+struct grant_dma_iommu_device {
+ struct device *dev;
+ struct iommu_device iommu;
+};
+
+/* Nothing is really needed here */
+static const struct iommu_ops grant_dma_iommu_ops;
+
+static const struct of_device_id grant_dma_iommu_of_match[] = {
+ { .compatible = "xen,grant-dma" },
+ { },
+};
+
+static int grant_dma_iommu_probe(struct platform_device *pdev)
+{
+ struct grant_dma_iommu_device *mmu;
+ int ret;
+
+ mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return -ENOMEM;
+
+ mmu->dev = &pdev->dev;
+
+ ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mmu);
+
+ return 0;
+}
+
+static int grant_dma_iommu_remove(struct platform_device *pdev)
+{
+ struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ iommu_device_unregister(&mmu->iommu);
+
+ return 0;
+}
+
+static struct platform_driver grant_dma_iommu_driver = {
+ .driver = {
+ .name = "grant-dma-iommu",
+ .of_match_table = grant_dma_iommu_of_match,
+ },
+ .probe = grant_dma_iommu_probe,
+ .remove = grant_dma_iommu_remove,
+};
+
+static int __init grant_dma_iommu_init(void)
+{
+ struct device_node *iommu_np;
+
+ iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match);
+ if (!iommu_np)
+ return 0;
+
+ of_node_put(iommu_np);
+
+ return platform_driver_register(&grant_dma_iommu_driver);
+}
+subsys_initcall(grant_dma_iommu_init);
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
new file mode 100644
index 000000000000..fc0142484001
--- /dev/null
+++ b/drivers/xen/grant-dma-ops.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Xen grant DMA-mapping layer - contains special DMA-mapping routines
+ * for providing grant references as DMA addresses to be used by frontends
+ * (e.g. virtio) in Xen guests
+ *
+ * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
+ */
+
+#include <linux/module.h>
+#include <linux/dma-map-ops.h>
+#include <linux/of.h>
+#include <linux/pfn.h>
+#include <linux/xarray.h>
+#include <xen/xen.h>
+#include <xen/xen-ops.h>
+#include <xen/grant_table.h>
+
+struct xen_grant_dma_data {
+ /* The ID of backend domain */
+ domid_t backend_domid;
+ /* Is device behaving sane? */
+ bool broken;
+};
+
+static DEFINE_XARRAY(xen_grant_dma_devices);
+
+#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
+
+static inline dma_addr_t grant_to_dma(grant_ref_t grant)
+{
+ return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
+}
+
+static inline grant_ref_t dma_to_grant(dma_addr_t dma)
+{
+ return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
+}
+
+static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
+{
+ struct xen_grant_dma_data *data;
+
+ xa_lock(&xen_grant_dma_devices);
+ data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
+ xa_unlock(&xen_grant_dma_devices);
+
+ return data;
+}
+
+/*
+ * DMA ops for Xen frontends (e.g. virtio).
+ *
+ * Used to act as a kind of software IOMMU for Xen guests by using grants as
+ * DMA addresses.
+ * Such a DMA address is formed by using the grant reference as a frame
+ * number and setting the highest address bit (this bit is for the backend
+ * to be able to distinguish it from e.g. a mmio address).
+ */
+static void *xen_grant_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct xen_grant_dma_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
+ unsigned long pfn;
+ grant_ref_t grant;
+ void *ret;
+
+ data = find_xen_grant_dma_data(dev);
+ if (!data)
+ return NULL;
+
+ if (unlikely(data->broken))
+ return NULL;
+
+ ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
+ if (!ret)
+ return NULL;
+
+ pfn = virt_to_pfn(ret);
+
+ if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
+ free_pages_exact(ret, n_pages * PAGE_SIZE);
+ return NULL;
+ }
+
+ for (i = 0; i < n_pages; i++) {
+ gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
+ pfn_to_gfn(pfn + i), 0);
+ }
+
+ *dma_handle = grant_to_dma(grant);
+
+ return ret;
+}
+
+static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ struct xen_grant_dma_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
+ grant_ref_t grant;
+
+ data = find_xen_grant_dma_data(dev);
+ if (!data)
+ return;
+
+ if (unlikely(data->broken))
+ return;
+
+ grant = dma_to_grant(dma_handle);
+
+ for (i = 0; i < n_pages; i++) {
+ if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
+ dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
+ data->broken = true;
+ return;
+ }
+ }
+
+ gnttab_free_grant_reference_seq(grant, n_pages);
+
+ free_pages_exact(vaddr, n_pages * PAGE_SIZE);
+}
+
+static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle,
+ enum dma_data_direction dir,
+ gfp_t gfp)
+{
+ void *vaddr;
+
+ vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
+ if (!vaddr)
+ return NULL;
+
+ return virt_to_page(vaddr);
+}
+
+static void xen_grant_dma_free_pages(struct device *dev, size_t size,
+ struct page *vaddr, dma_addr_t dma_handle,
+ enum dma_data_direction dir)
+{
+ xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
+}
+
+static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct xen_grant_dma_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
+ grant_ref_t grant;
+ dma_addr_t dma_handle;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return DMA_MAPPING_ERROR;
+
+ data = find_xen_grant_dma_data(dev);
+ if (!data)
+ return DMA_MAPPING_ERROR;
+
+ if (unlikely(data->broken))
+ return DMA_MAPPING_ERROR;
+
+ if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
+ return DMA_MAPPING_ERROR;
+
+ for (i = 0; i < n_pages; i++) {
+ gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
+ xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
+ }
+
+ dma_handle = grant_to_dma(grant) + offset;
+
+ return dma_handle;
+}
+
+static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct xen_grant_dma_data *data;
+ unsigned int i, n_pages = PFN_UP(size);
+ grant_ref_t grant;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return;
+
+ data = find_xen_grant_dma_data(dev);
+ if (!data)
+ return;
+
+ if (unlikely(data->broken))
+ return;
+
+ grant = dma_to_grant(dma_handle);
+
+ for (i = 0; i < n_pages; i++) {
+ if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
+ dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
+ data->broken = true;
+ return;
+ }
+ }
+
+ gnttab_free_grant_reference_seq(grant, n_pages);
+}
+
+static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return;
+
+ for_each_sg(sg, s, nents, i)
+ xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
+ attrs);
+}
+
+static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s;
+ unsigned int i;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return -EINVAL;
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir, attrs);
+ if (s->dma_address == DMA_MAPPING_ERROR)
+ goto out;
+
+ sg_dma_len(s) = s->length;
+ }
+
+ return nents;
+
+out:
+ xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ sg_dma_len(sg) = 0;
+
+ return -EIO;
+}
+
+static int xen_grant_dma_supported(struct device *dev, u64 mask)
+{
+ return mask == DMA_BIT_MASK(64);
+}
+
+static const struct dma_map_ops xen_grant_dma_ops = {
+ .alloc = xen_grant_dma_alloc,
+ .free = xen_grant_dma_free,
+ .alloc_pages = xen_grant_dma_alloc_pages,
+ .free_pages = xen_grant_dma_free_pages,
+ .mmap = dma_common_mmap,
+ .get_sgtable = dma_common_get_sgtable,
+ .map_page = xen_grant_dma_map_page,
+ .unmap_page = xen_grant_dma_unmap_page,
+ .map_sg = xen_grant_dma_map_sg,
+ .unmap_sg = xen_grant_dma_unmap_sg,
+ .dma_supported = xen_grant_dma_supported,
+};
+
+bool xen_is_grant_dma_device(struct device *dev)
+{
+ struct device_node *iommu_np;
+ bool has_iommu;
+
+ /* XXX Handle only DT devices for now */
+ if (!dev->of_node)
+ return false;
+
+ iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
+ has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
+ of_node_put(iommu_np);
+
+ return has_iommu;
+}
+
+void xen_grant_setup_dma_ops(struct device *dev)
+{
+ struct xen_grant_dma_data *data;
+ struct of_phandle_args iommu_spec;
+
+ data = find_xen_grant_dma_data(dev);
+ if (data) {
+ dev_err(dev, "Xen grant DMA data is already created\n");
+ return;
+ }
+
+ /* XXX ACPI device unsupported for now */
+ if (!dev->of_node)
+ goto err;
+
+ if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
+ 0, &iommu_spec)) {
+ dev_err(dev, "Cannot parse iommus property\n");
+ goto err;
+ }
+
+ if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
+ iommu_spec.args_count != 1) {
+ dev_err(dev, "Incompatible IOMMU node\n");
+ of_node_put(iommu_spec.np);
+ goto err;
+ }
+
+ of_node_put(iommu_spec.np);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto err;
+
+ /*
+ * The endpoint ID here means the ID of the domain where the corresponding
+ * backend is running
+ */
+ data->backend_domid = iommu_spec.args[0];
+
+ if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
+ GFP_KERNEL))) {
+ dev_err(dev, "Cannot store Xen grant DMA data\n");
+ goto err;
+ }
+
+ dev->dma_ops = &xen_grant_dma_ops;
+
+ return;
+
+err:
+ dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
+}
+
+MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7a18292540bc..738029de3c67 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/bitmap.h>
#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -70,9 +71,32 @@
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
+
+/*
+ * Handling of free grants:
+ *
+ * Free grants are in a simple list anchored in gnttab_free_head. They are
+ * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
+ * of free entries is stored in gnttab_free_count.
+ * Additionally there is a bitmap of free entries anchored in
+ * gnttab_free_bitmap. This is being used for simplifying allocation of
+ * multiple consecutive grants, which is needed e.g. for support of virtio.
+ * gnttab_last_free is used to add free entries of new frames at the end of
+ * the free list.
+ * gnttab_free_tail_ptr specifies the variable which references the start
+ * of consecutive free grants ending with gnttab_last_free. This pointer is
+ * updated in a rather defensive way, in order to avoid performance hits in
+ * hot paths.
+ * All those variables are protected by gnttab_list_lock.
+ */
static int gnttab_free_count;
-static grant_ref_t gnttab_free_head;
+static unsigned int gnttab_size;
+static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
+static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
+static grant_ref_t *gnttab_free_tail_ptr;
+static unsigned long *gnttab_free_bitmap;
static DEFINE_SPINLOCK(gnttab_list_lock);
+
struct grant_frames xen_auto_xlat_grant_frames;
static unsigned int xen_gnttab_version;
module_param_named(version, xen_gnttab_version, uint, 0);
@@ -168,16 +192,116 @@ static int get_free_entries(unsigned count)
ref = head = gnttab_free_head;
gnttab_free_count -= count;
- while (count-- > 1)
- head = gnttab_entry(head);
+ while (count--) {
+ bitmap_clear(gnttab_free_bitmap, head, 1);
+ if (gnttab_free_tail_ptr == __gnttab_entry(head))
+ gnttab_free_tail_ptr = &gnttab_free_head;
+ if (count)
+ head = gnttab_entry(head);
+ }
gnttab_free_head = gnttab_entry(head);
gnttab_entry(head) = GNTTAB_LIST_END;
+ if (!gnttab_free_count) {
+ gnttab_last_free = GNTTAB_LIST_END;
+ gnttab_free_tail_ptr = NULL;
+ }
+
spin_unlock_irqrestore(&gnttab_list_lock, flags);
return ref;
}
+static int get_seq_entry_count(void)
+{
+ if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
+ *gnttab_free_tail_ptr == GNTTAB_LIST_END)
+ return 0;
+
+ return gnttab_last_free - *gnttab_free_tail_ptr + 1;
+}
+
+/* Rebuilds the free grant list and tries to find count consecutive entries. */
+static int get_free_seq(unsigned int count)
+{
+ int ret = -ENOSPC;
+ unsigned int from, to;
+ grant_ref_t *last;
+
+ gnttab_free_tail_ptr = &gnttab_free_head;
+ last = &gnttab_free_head;
+
+ for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
+ from < gnttab_size;
+ from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
+ to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
+ from + 1);
+ if (ret < 0 && to - from >= count) {
+ ret = from;
+ bitmap_clear(gnttab_free_bitmap, ret, count);
+ from += count;
+ gnttab_free_count -= count;
+ if (from == to)
+ continue;
+ }
+
+ /*
+ * Recreate the free list in order to have it properly sorted.
+ * This is needed to make sure that the free tail has the maximum
+ * possible size.
+ */
+ while (from < to) {
+ *last = from;
+ last = __gnttab_entry(from);
+ gnttab_last_free = from;
+ from++;
+ }
+ if (to < gnttab_size)
+ gnttab_free_tail_ptr = __gnttab_entry(to - 1);
+ }
+
+ *last = GNTTAB_LIST_END;
+ if (gnttab_last_free != gnttab_size - 1)
+ gnttab_free_tail_ptr = NULL;
+
+ return ret;
+}
+
+static int get_free_entries_seq(unsigned int count)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+
+ if (gnttab_free_count < count) {
+ ret = gnttab_expand(count - gnttab_free_count);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (get_seq_entry_count() < count) {
+ ret = get_free_seq(count);
+ if (ret >= 0)
+ goto out;
+ ret = gnttab_expand(count - get_seq_entry_count());
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = *gnttab_free_tail_ptr;
+ *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
+ gnttab_free_count -= count;
+ if (!gnttab_free_count)
+ gnttab_free_tail_ptr = NULL;
+ bitmap_clear(gnttab_free_bitmap, ret, count);
+
+ out:
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+
+ return ret;
+}
+
static void do_free_callbacks(void)
{
struct gnttab_free_callback *callback, *next;
@@ -204,21 +328,51 @@ static inline void check_free_callbacks(void)
do_free_callbacks();
}
-static void put_free_entry(grant_ref_t ref)
+static void put_free_entry_locked(grant_ref_t ref)
{
- unsigned long flags;
-
if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
return;
- spin_lock_irqsave(&gnttab_list_lock, flags);
gnttab_entry(ref) = gnttab_free_head;
gnttab_free_head = ref;
+ if (!gnttab_free_count)
+ gnttab_last_free = ref;
+ if (gnttab_free_tail_ptr == &gnttab_free_head)
+ gnttab_free_tail_ptr = __gnttab_entry(ref);
gnttab_free_count++;
+ bitmap_set(gnttab_free_bitmap, ref, 1);
+}
+
+static void put_free_entry(grant_ref_t ref)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ put_free_entry_locked(ref);
check_free_callbacks();
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
+static void gnttab_set_free(unsigned int start, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = start; i < start + n - 1; i++)
+ gnttab_entry(i) = i + 1;
+
+ gnttab_entry(i) = GNTTAB_LIST_END;
+ if (!gnttab_free_count) {
+ gnttab_free_head = start;
+ gnttab_free_tail_ptr = &gnttab_free_head;
+ } else {
+ gnttab_entry(gnttab_last_free) = start;
+ }
+ gnttab_free_count += n;
+ gnttab_last_free = i;
+
+ bitmap_set(gnttab_free_bitmap, start, n);
+}
+
/*
* Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
* Introducing a valid entry into the grant table:
@@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head)
{
grant_ref_t ref;
unsigned long flags;
- int count = 1;
- if (head == GNTTAB_LIST_END)
- return;
+
spin_lock_irqsave(&gnttab_list_lock, flags);
- ref = head;
- while (gnttab_entry(ref) != GNTTAB_LIST_END) {
- ref = gnttab_entry(ref);
- count++;
+ while (head != GNTTAB_LIST_END) {
+ ref = gnttab_entry(head);
+ put_free_entry_locked(head);
+ head = ref;
}
- gnttab_entry(ref) = gnttab_free_head;
- gnttab_free_head = head;
- gnttab_free_count += count;
check_free_callbacks();
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
+void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ for (i = count; i > 0; i--)
+ put_free_entry_locked(head + i - 1);
+ check_free_callbacks();
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
+
int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
{
int h = get_free_entries(count);
@@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
}
EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
+int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
+{
+ int h;
+
+ if (count == 1)
+ h = get_free_entries(1);
+ else
+ h = get_free_entries_seq(count);
+
+ if (h < 0)
+ return -ENOSPC;
+
+ *first = h;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
+
int gnttab_empty_grant_references(const grant_ref_t *private_head)
{
return (*private_head == GNTTAB_LIST_END);
@@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames)
goto grow_nomem;
}
+ gnttab_set_free(gnttab_size, extra_entries);
- for (i = grefs_per_frame * nr_grant_frames;
- i < grefs_per_frame * new_nr_grant_frames - 1; i++)
- gnttab_entry(i) = i + 1;
-
- gnttab_entry(i) = gnttab_free_head;
- gnttab_free_head = grefs_per_frame * nr_grant_frames;
- gnttab_free_count += extra_entries;
+ if (!gnttab_free_tail_ptr)
+ gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
nr_grant_frames = new_nr_grant_frames;
+ gnttab_size += extra_entries;
check_free_callbacks();
@@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries)
int gnttab_init(void)
{
int i;
- unsigned long max_nr_grant_frames;
+ unsigned long max_nr_grant_frames, max_nr_grefs;
unsigned int max_nr_glist_frames, nr_glist_frames;
- unsigned int nr_init_grefs;
int ret;
gnttab_request_version();
max_nr_grant_frames = gnttab_max_grant_frames();
+ max_nr_grefs = max_nr_grant_frames *
+ gnttab_interface->grefs_per_grant_frame;
nr_grant_frames = 1;
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
- max_nr_glist_frames = (max_nr_grant_frames *
- gnttab_interface->grefs_per_grant_frame / RPP);
+ max_nr_glist_frames = max_nr_grefs / RPP;
gnttab_list = kmalloc_array(max_nr_glist_frames,
sizeof(grant_ref_t *),
@@ -1454,6 +1631,12 @@ int gnttab_init(void)
}
}
+ gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
+ if (!gnttab_free_bitmap) {
+ ret = -ENOMEM;
+ goto ini_nomem;
+ }
+
ret = arch_gnttab_init(max_nr_grant_frames,
nr_status_frames(max_nr_grant_frames));
if (ret < 0)
@@ -1464,15 +1647,10 @@ int gnttab_init(void)
goto ini_nomem;
}
- nr_init_grefs = nr_grant_frames *
- gnttab_interface->grefs_per_grant_frame;
-
- for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
- gnttab_entry(i) = i + 1;
+ gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
- gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
- gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES;
- gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES;
+ gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
+ gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
printk("Grant table initialized\n");
return 0;
@@ -1481,6 +1659,7 @@ int gnttab_init(void)
for (i--; i >= 0; i--)
free_page((unsigned long)gnttab_list[i]);
kfree(gnttab_list);
+ bitmap_free(gnttab_free_bitmap);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_init);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 34742c6e189e..f17c4c03db30 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
-EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
struct remap_pfn {
struct mm_struct *mm;