summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/Kconfig1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl.c22
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl_reg.h1
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c4
-rw-r--r--drivers/accel/ivpu/ivpu_job.c21
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c22
-rw-r--r--drivers/accel/qaic/qaic_control.c41
-rw-r--r--drivers/accel/qaic/qaic_data.c97
-rw-r--r--drivers/accel/qaic/qaic_drv.c6
-rw-r--r--drivers/acpi/acpi_ffh.c2
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_pad.c1
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/apei/apei-internal.h6
-rw-r--r--drivers/acpi/apei/bert.c3
-rw-r--r--drivers/acpi/apei/ghes.c4
-rw-r--r--drivers/acpi/arm64/Makefile2
-rw-r--r--drivers/acpi/arm64/agdi.c2
-rw-r--r--drivers/acpi/arm64/apmt.c12
-rw-r--r--drivers/acpi/arm64/init.c13
-rw-r--r--drivers/acpi/arm64/init.h6
-rw-r--r--drivers/acpi/arm64/iort.c1
-rw-r--r--drivers/acpi/bus.c60
-rw-r--r--drivers/acpi/button.c164
-rw-r--r--drivers/acpi/ec.c31
-rw-r--r--drivers/acpi/nfit/nfit.h2
-rw-r--r--drivers/acpi/processor_idle.c4
-rw-r--r--drivers/acpi/resource.c58
-rw-r--r--drivers/acpi/scan.c81
-rw-r--r--drivers/acpi/sleep.c18
-rw-r--r--drivers/acpi/thermal.c287
-rw-r--r--drivers/acpi/tiny-power-button.c49
-rw-r--r--drivers/acpi/video_detect.c45
-rw-r--r--drivers/acpi/x86/s2idle.c66
-rw-r--r--drivers/acpi/x86/utils.c26
-rw-r--r--drivers/android/binder.c28
-rw-r--r--drivers/android/binder_alloc.c64
-rw-r--r--drivers/android/binder_alloc.h4
-rw-r--r--drivers/android/binder_alloc_selftest.c2
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c56
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/auxdisplay/lcd2s.c2
-rw-r--r--drivers/base/cacheinfo.c26
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/dd.c6
-rw-r--r--drivers/base/firmware_loader/main.c2
-rw-r--r--drivers/base/node.c7
-rw-r--r--drivers/base/power/domain.c15
-rw-r--r--drivers/base/power/wakeup.c5
-rw-r--r--drivers/base/regmap/Kconfig13
-rw-r--r--drivers/base/regmap/regcache-maple.c5
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/base/regmap/regmap-sdw.c4
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c2
-rw-r--r--drivers/base/regmap/regmap.c6
-rw-r--r--drivers/block/amiflop.c20
-rw-r--r--drivers/block/aoe/aoeblk.c8
-rw-r--r--drivers/block/aoe/aoechr.c30
-rw-r--r--drivers/block/ataflop.c43
-rw-r--r--drivers/block/brd.c91
-rw-r--r--drivers/block/drbd/drbd_bitmap.c4
-rw-r--r--drivers/block/drbd/drbd_main.c14
-rw-r--r--drivers/block/drbd/drbd_nl.c24
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/floppy.c74
-rw-r--r--drivers/block/loop.c26
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c4
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/null_blk/main.c1
-rw-r--r--drivers/block/pktcdvd.c560
-rw-r--r--drivers/block/rbd.c68
-rw-r--r--drivers/block/rnbd/Makefile6
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c24
-rw-r--r--drivers/block/rnbd/rnbd-clt.c8
-rw-r--r--drivers/block/rnbd/rnbd-common.c23
-rw-r--r--drivers/block/rnbd/rnbd-proto.h31
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c28
-rw-r--r--drivers/block/rnbd/rnbd-srv.c67
-rw-r--r--drivers/block/rnbd/rnbd-srv.h4
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c26
-rw-r--r--drivers/block/swim3.c33
-rw-r--r--drivers/block/ublk_drv.c507
-rw-r--r--drivers/block/virtio_blk.c82
-rw-r--r--drivers/block/xen-blkback/xenbus.c4
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/block/z2ram.c8
-rw-r--r--drivers/block/zram/zram_drv.c23
-rw-r--r--drivers/bluetooth/btnxpuart.c6
-rw-r--r--drivers/bluetooth/hci_qca.c6
-rw-r--r--drivers/cdrom/cdrom.c42
-rw-r--r--drivers/cdrom/gdrom.c12
-rw-r--r--drivers/char/agp/parisc-agp.c15
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm_tis.c23
-rw-r--r--drivers/char/tpm/tpm_tis_core.c43
-rw-r--r--drivers/char/tpm/tpm_tis_core.h8
-rw-r--r--drivers/clk/clk-composite.c5
-rw-r--r--drivers/clk/clk-loongson2.c2
-rw-r--r--drivers/clk/imx/clk-imx1.c1
-rw-r--r--drivers/clk/imx/clk-imx27.c1
-rw-r--r--drivers/clk/imx/clk-imx31.c1
-rw-r--r--drivers/clk/imx/clk-imx35.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c18
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c54
-rw-r--r--drivers/clocksource/hyperv_timer.c96
-rw-r--r--drivers/clocksource/ingenic-timer.c10
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c19
-rw-r--r--drivers/clocksource/timer-imx-gpt.c25
-rw-r--r--drivers/clocksource/timer-loongson1-pwm.c236
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.x8617
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c177
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c8
-rw-r--r--drivers/cpuidle/poll_state.c4
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c2
-rw-r--r--drivers/cxl/core/mbox.c12
-rw-r--r--drivers/cxl/core/pci.c112
-rw-r--r--drivers/cxl/core/port.c7
-rw-r--r--drivers/cxl/cxl.h1
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/cxl/cxlpci.h2
-rw-r--r--drivers/cxl/mem.c3
-rw-r--r--drivers/cxl/pci.c6
-rw-r--r--drivers/cxl/port.c20
-rw-r--r--drivers/devfreq/exynos-bus.c1
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c3
-rw-r--r--drivers/dma-buf/udmabuf.c47
-rw-r--r--drivers/dma/at_hdmac.c17
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/idxd/cdev.c1
-rw-r--r--drivers/dma/pl330.c8
-rw-r--r--drivers/dma/ti/k3-udma.c4
-rw-r--r--drivers/edac/Kconfig11
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd64_edac.c398
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/mce_amd.c3
-rw-r--r--drivers/edac/npcm_edac.c543
-rw-r--r--drivers/edac/qcom_edac.c118
-rw-r--r--drivers/edac/thunderx_edac.c2
-rw-r--r--drivers/firmware/arm_ffa/bus.c19
-rw-r--r--drivers/firmware/arm_ffa/driver.c10
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c5
-rw-r--r--drivers/firmware/efi/Kconfig14
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi.c47
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot3
-rw-r--r--drivers/firmware/efi/libstub/bitmap.c41
-rw-r--r--drivers/firmware/efi/libstub/efistub.h9
-rw-r--r--drivers/firmware/efi/libstub/find.c43
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c222
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c75
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c147
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-f7188x.c28
-rw-r--r--drivers/gpio/gpio-mockup.c2
-rw-r--r--drivers/gpio/gpio-sifive.c8
-rw-r--r--drivers/gpio/gpio-sim.c18
-rw-r--r--drivers/gpio/gpiolib.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c92
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c5
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c55
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h5
-rw-r--r--drivers/gpu/drm/ast/ast_main.c11
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c15
-rw-r--r--drivers/gpu/drm/ast/ast_post.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c12
-rw-r--r--drivers/gpu/drm/drm_managed.c22
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c12
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c17
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c12
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c78
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c25
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c8
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c12
-rw-r--r--drivers/hid/wacom_sys.c21
-rw-r--r--drivers/hid/wacom_wac.c2
-rw-r--r--drivers/hv/channel_mgmt.c18
-rw-r--r--drivers/hv/hv_common.c48
-rw-r--r--drivers/hv/vmbus_drv.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h38
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c4
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c6
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c11
-rw-r--r--drivers/i2c/busses/i2c-qup.c21
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/idle/intel_idle.c231
-rw-r--r--drivers/iio/accel/kionix-kx022a.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c4
-rw-r--r--drivers/iio/adc/ad4130.c12
-rw-r--r--drivers/iio/adc/ad7192.c8
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c4
-rw-r--r--drivers/iio/adc/imx93_adc.c7
-rw-r--r--drivers/iio/adc/mt6370-adc.c53
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c10
-rw-r--r--drivers/iio/adc/palmas_gpadc.c10
-rw-r--r--drivers/iio/adc/stm32-adc.c61
-rw-r--r--drivers/iio/addac/ad74413r.c2
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/mcp4725.c16
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c10
-rw-r--r--drivers/iio/industrialio-gts-helper.c42
-rw-r--r--drivers/iio/light/rohm-bu27034.c26
-rw-r--r--drivers/iio/light/vcnl4035.c3
-rw-r--r--drivers/iio/magnetometer/tmag5273.c5
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c7
-rw-r--r--drivers/infiniband/core/uverbs_main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c7
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c43
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c12
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c89
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c276
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h16
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c26
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c44
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c27
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c55
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c4
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/misc/soc_button_array.c30
-rw-r--r--drivers/input/mouse/elantech.c9
-rw-r--r--drivers/input/touchscreen/cyttsp5.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c2
-rw-r--r--drivers/iommu/Kconfig17
-rw-r--r--drivers/iommu/amd/amd_iommu.h4
-rw-r--r--drivers/iommu/amd/init.c24
-rw-r--r--drivers/iommu/amd/iommu.c39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c10
-rw-r--r--drivers/iommu/mtk_iommu.c3
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/irqchip/irq-clps711x.c7
-rw-r--r--drivers/irqchip/irq-ftintc010.c4
-rw-r--r--drivers/irqchip/irq-gic-common.c10
-rw-r--r--drivers/irqchip/irq-gic-common.h1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c65
-rw-r--r--drivers/irqchip/irq-jcore-aic.c7
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c135
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c10
-rw-r--r--drivers/irqchip/irq-mbigen.c31
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c32
-rw-r--r--drivers/irqchip/irq-mmp.c127
-rw-r--r--drivers/irqchip/irq-mxs.c1
-rw-r--r--drivers/irqchip/irq-stm32-exti.c13
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c8
-rw-r--r--drivers/mailbox/mailbox-test.c10
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c25
-rw-r--r--drivers/md/bcache/btree.h1
-rw-r--r--drivers/md/bcache/request.c4
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/super.c29
-rw-r--r--drivers/md/bcache/sysfs.c31
-rw-r--r--drivers/md/bcache/sysfs.h2
-rw-r--r--drivers/md/bcache/writeback.c10
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-clone-target.c10
-rw-r--r--drivers/md/dm-core.h7
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-era-target.c6
-rw-r--r--drivers/md/dm-init.c4
-rw-r--r--drivers/md/dm-ioctl.c15
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-snap.c18
-rw-r--r--drivers/md/dm-table.c37
-rw-r--r--drivers/md/dm-thin-metadata.c22
-rw-r--r--drivers/md/dm-thin.c12
-rw-r--r--drivers/md/dm-verity-fec.c2
-rw-r--r--drivers/md/dm-verity-target.c6
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md-autodetect.c3
-rw-r--r--drivers/md/md-bitmap.c93
-rw-r--r--drivers/md/md-bitmap.h8
-rw-r--r--drivers/md/md-cluster.c17
-rw-r--r--drivers/md/md-multipath.c4
-rw-r--r--drivers/md/md.c280
-rw-r--r--drivers/md/md.h37
-rw-r--r--drivers/md/raid1-10.c74
-rw-r--r--drivers/md/raid1.c43
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid10.c199
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c24
-rw-r--r--drivers/md/raid5-ppl.c4
-rw-r--r--drivers/md/raid5.c70
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/cec/core/cec-adap.c8
-rw-r--r--drivers/media/cec/core/cec-core.c2
-rw-r--r--drivers/media/cec/core/cec-priv.h1
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c49
-rw-r--r--drivers/media/dvb-core/dvb_demux.c4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c16
-rw-r--r--drivers/media/dvb-core/dvb_net.c38
-rw-r--r--drivers/media/dvb-core/dvbdev.c84
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c19
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c3
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c1
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c12
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c20
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c12
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig1
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c3
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c3
-rw-r--r--drivers/misc/eeprom/Kconfig1
-rw-r--r--drivers/misc/fastrpc.c31
-rw-r--r--drivers/mmc/core/block.c17
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c34
-rw-r--r--drivers/mmc/host/bcm2835.c4
-rw-r--r--drivers/mmc/host/litex_mmc.c1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c14
-rw-r--r--drivers/mmc/host/mmci.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c6
-rw-r--r--drivers/mmc/host/owl-mmc.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-cadence.c8
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c18
-rw-r--r--drivers/mmc/host/sdhci-msm.c3
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c4
-rw-r--r--drivers/mmc/host/usdhi6rol0.c6
-rw-r--r--drivers/mmc/host/vub300.c3
-rw-r--r--drivers/mtd/devices/block2mtd.c64
-rw-r--r--drivers/mtd/mtd_blkdevs.c8
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h8
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c10
-rw-r--r--drivers/mtd/spi-nor/core.c5
-rw-r--r--drivers/mtd/spi-nor/spansion.c4
-rw-r--r--drivers/mtd/ubi/block.c9
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/bxcan.c34
-rw-r--r--drivers/net/can/dev/skb.c3
-rw-r--r--drivers/net/can/kvaser_pciefd.c51
-rw-r--r--drivers/net/dsa/lan9303-core.c4
-rw-r--r--drivers/net/dsa/mt7530.c48
-rw-r--r--drivers/net/dsa/mt7530.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2
-rw-r--r--drivers/net/dsa/qca/Kconfig1
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c83
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h3
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c11
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c1
-rw-r--r--drivers/net/ethernet/8390/wd.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c42
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c30
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c5
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c13
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c34
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c44
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c38
-rw-r--r--drivers/net/ethernet/sfc/ef10.c25
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c95
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c27
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c6
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ieee802154/adf7242.c2
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c4
-rw-r--r--drivers/net/macsec.c12
-rw-r--r--drivers/net/mdio/mdio-i2c.c15
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/phy/dp83867.c24
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/mscc/mscc.h2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c82
-rw-r--r--drivers/net/phy/mxl-gpy.c16
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/phy/phylink.c50
-rw-r--r--drivers/net/team/team.c7
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/cdc_ncm.c24
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/virtio_net.c77
-rw-r--r--drivers/net/wan/lapbether.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c21
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c43
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c28
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c27
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c12
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c15
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h1
-rw-r--r--drivers/nfc/fdp/fdp.c3
-rw-r--r--drivers/nfc/nfcsim.c4
-rw-r--r--drivers/nubus/nubus.c13
-rw-r--r--drivers/nubus/proc.c33
-rw-r--r--drivers/nvme/host/Makefile2
-rw-r--r--drivers/nvme/host/auth.c6
-rw-r--r--drivers/nvme/host/constants.c2
-rw-r--r--drivers/nvme/host/core.c724
-rw-r--r--drivers/nvme/host/fabrics.c241
-rw-r--r--drivers/nvme/host/fabrics.h21
-rw-r--r--drivers/nvme/host/hwmon.c4
-rw-r--r--drivers/nvme/host/ioctl.c72
-rw-r--r--drivers/nvme/host/multipath.c7
-rw-r--r--drivers/nvme/host/nvme.h23
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/nvme/host/rdma.c81
-rw-r--r--drivers/nvme/host/sysfs.c668
-rw-r--r--drivers/nvme/host/tcp.c92
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c13
-rw-r--r--drivers/nvme/target/fcloop.c5
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c4
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/of/overlay.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c139
-rw-r--r--drivers/pci/quirks.c9
-rw-r--r--drivers/perf/Kconfig8
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c30
-rw-r--r--drivers/perf/arm-cci.c4
-rw-r--r--drivers/perf/arm-cmn.c172
-rw-r--r--drivers/perf/arm_cspmu/Kconfig3
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c89
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h5
-rw-r--r--drivers/perf/arm_dmc620_pmu.c22
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/perf/arm_pmuv3.c21
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c711
-rw-r--r--drivers/perf/hisilicon/Makefile2
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c127
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h14
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c578
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c15
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c8
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c11
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c5
-rw-r--r--drivers/platform/surface/aggregator/controller.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c10
-rw-r--r--drivers/platform/x86/amd/pmc.c4
-rw-r--r--drivers/platform/x86/amd/pmf/core.c42
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/intel/ifs/load.c2
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c13
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c12
-rw-r--r--drivers/power/supply/ab8500_btemp.c6
-rw-r--r--drivers/power/supply/ab8500_fg.c6
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c1
-rw-r--r--drivers/power/supply/bq25890_charger.c5
-rw-r--r--drivers/power/supply/bq27xxx_battery.c181
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c3
-rw-r--r--drivers/power/supply/mt6360_charger.c4
-rw-r--r--drivers/power/supply/power_supply_core.c15
-rw-r--r--drivers/power/supply/power_supply_leds.c5
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/rt9467-charger.c2
-rw-r--r--drivers/power/supply/sbs-charger.c2
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c9
-rw-r--r--drivers/powercap/Kconfig18
-rw-r--r--drivers/powercap/Makefile1
-rw-r--r--drivers/powercap/intel_rapl_common.c883
-rw-r--r--drivers/powercap/intel_rapl_msr.c31
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c325
-rw-r--r--drivers/pwm/pwm-atmel.c2
-rw-r--r--drivers/pwm/pwm-pxa.c2
-rw-r--r--drivers/ras/debugfs.c2
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/mt6359-regulator.c7
-rw-r--r--drivers/regulator/pca9450-regulator.c4
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c30
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c33
-rw-r--r--drivers/s390/block/dasd_genhd.c5
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c6
-rw-r--r--drivers/s390/block/dcssblk.c11
-rw-r--r--drivers/s390/cio/device.c7
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/net/ism_drv.c8
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/commsup.c6
-rw-r--r--drivers/scsi/aacraid/linit.c14
-rw-r--r--drivers/scsi/aacraid/src.c25
-rw-r--r--drivers/scsi/ch.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3
-rw-r--r--drivers/scsi/scsi_bsg.c4
-rw-r--r--drivers/scsi/scsi_ioctl.c38
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/sd.c39
-rw-r--r--drivers/scsi/sg.c16
-rw-r--r--drivers/scsi/sr.c22
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/stex.c4
-rw-r--r--drivers/scsi/storvsc_drv.c10
-rw-r--r--drivers/soc/fsl/qe/Kconfig4
-rw-r--r--drivers/soc/qcom/Makefile3
-rw-r--r--drivers/soc/qcom/icc-bwmon.c4
-rw-r--r--drivers/soc/qcom/ramp_controller.c2
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c1
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/qcom/rpmhpd.c16
-rw-r--r--drivers/soundwire/dmi-quirks.c7
-rw-r--r--drivers/soundwire/qcom.c17
-rw-r--r--drivers/soundwire/stream.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c7
-rw-r--r--drivers/spi/spi-cadence.c105
-rw-r--r--drivers/spi/spi-dw-mmio.c8
-rw-r--r--drivers/spi/spi-fsl-dspi.c15
-rw-r--r--drivers/spi/spi-fsl-lpspi.c7
-rw-r--r--drivers/spi/spi-geni-qcom.c4
-rw-r--r--drivers/spi/spi-mt65xx.c3
-rw-r--r--drivers/spi/spi-qup.c37
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c4
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c2
-rw-r--r--drivers/staging/octeon/TODO1
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c63
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c74
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c51
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h4
-rw-r--r--drivers/target/target_core_iblock.c11
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/tee/amdtee/amdtee_if.h10
-rw-r--r--drivers/tee/amdtee/call.c30
-rw-r--r--drivers/tee/optee/smc_abi.c4
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/amlogic_thermal.c3
-rw-r--r--drivers/thermal/armada_thermal.c32
-rw-r--r--drivers/thermal/imx8mm_thermal.c3
-rw-r--r--drivers/thermal/imx_sc_thermal.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c218
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h57
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c11
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c2
-rw-r--r--drivers/thermal/k3_bandgap.c3
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c14
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c4
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c4
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c38
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c126
-rw-r--r--drivers/thermal/qcom/tsens-v1.c22
-rw-r--r--drivers/thermal/qcom/tsens.c26
-rw-r--r--drivers/thermal/qcom/tsens.h6
-rw-r--r--drivers/thermal/qoriq_thermal.c52
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c141
-rw-r--r--drivers/thermal/st/st_thermal.c4
-rw-r--r--drivers/thermal/st/st_thermal.h2
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c6
-rw-r--r--drivers/thermal/sun8i_thermal.c59
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c3
-rw-r--r--drivers/thermal/thermal-generic-adc.c4
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_hwmon.c5
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c3
-rw-r--r--drivers/thunderbolt/dma_test.c8
-rw-r--r--drivers/thunderbolt/nhi.c34
-rw-r--r--drivers/thunderbolt/nhi_regs.h2
-rw-r--r--drivers/thunderbolt/tb.c17
-rw-r--r--drivers/thunderbolt/tunnel.c2
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c7
-rw-r--r--drivers/tty/serial/8250/8250_exar.c17
-rw-r--r--drivers/tty/serial/8250/8250_pci.c5
-rw-r--r--drivers/tty/serial/8250/8250_port.c1
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c4
-rw-r--r--drivers/tty/serial/Kconfig6
-rw-r--r--drivers/tty/serial/arc_uart.c7
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c46
-rw-r--r--drivers/tty/serial/lantiq.c1
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c9
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/vt/vc_screen.c11
-rw-r--r--drivers/ufs/core/ufs-mcq.c5
-rw-r--r--drivers/ufs/core/ufshcd.c10
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c13
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/buffer.c41
-rw-r--r--drivers/usb/core/devio.c20
-rw-r--r--drivers/usb/dwc3/core.c7
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/debugfs.c109
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c11
-rw-r--r--drivers/usb/dwc3/gadget.c80
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.c3
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c3
-rw-r--r--drivers/usb/gadget/udc/core.c71
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c4
-rw-r--r--drivers/usb/host/uhci-pci.c10
-rw-r--r--drivers/usb/host/xhci-pci.c12
-rw-r--r--drivers/usb/host/xhci-ring.c29
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/usb/storage/scsiglue.c28
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/pd.c2
-rw-r--r--drivers/usb/typec/tipd/core.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c2
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c5
-rw-r--r--drivers/vhost/net.c11
-rw-r--r--drivers/vhost/vdpa.c34
-rw-r--r--drivers/vhost/vhost.c93
-rw-r--r--drivers/vhost/vhost.h10
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/arcfb.c5
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c5
-rw-r--r--drivers/video/fbdev/au1100fb.c11
-rw-r--r--drivers/video/fbdev/au1200fb.c6
-rw-r--r--drivers/video/fbdev/broadsheetfb.c5
-rw-r--r--drivers/video/fbdev/bw2.c6
-rw-r--r--drivers/video/fbdev/core/bitblit.c3
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/i810/i810_dvt.c3
-rw-r--r--drivers/video/fbdev/imsttfb.c12
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c3
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/stifb.c6
-rw-r--r--drivers/video/fbdev/udlfb.c27
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig1
-rw-r--r--drivers/xen/pvcalls-back.c9
961 files changed, 15668 insertions, 7416 deletions
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
index 9bdf168bf1d0..1a4c4ed9d113 100644
--- a/drivers/accel/ivpu/Kconfig
+++ b/drivers/accel/ivpu/Kconfig
@@ -7,6 +7,7 @@ config DRM_ACCEL_IVPU
depends on PCI && PCI_MSI
select FW_LOADER
select SHMEM
+ select GENERIC_ALLOCATOR
help
Choose this option if you have a system that has an 14th generation Intel CPU
or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
index 382ec127be8e..fef35422c6f0 100644
--- a/drivers/accel/ivpu/ivpu_hw_mtl.c
+++ b/drivers/accel/ivpu/ivpu_hw_mtl.c
@@ -197,6 +197,11 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
+static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
+{
+ return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
+}
+
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
{
struct ivpu_hw_info *hw = vdev->hw;
@@ -239,6 +244,12 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
return ret;
}
+
+ ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
+ return ret;
+ }
}
return 0;
@@ -256,7 +267,7 @@ static int ivpu_pll_disable(struct ivpu_device *vdev)
static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
- u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
+ u32 val = 0;
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
@@ -754,9 +765,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
{
int ret = 0;
- if (ivpu_hw_mtl_reset(vdev)) {
+ if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
ivpu_err(vdev, "Failed to reset the VPU\n");
- ret = -EIO;
}
if (ivpu_pll_disable(vdev)) {
@@ -764,8 +774,10 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
ret = -EIO;
}
- if (ivpu_hw_mtl_d0i3_enable(vdev))
- ivpu_warn(vdev, "Failed to enable D0I3\n");
+ if (ivpu_hw_mtl_d0i3_enable(vdev)) {
+ ivpu_err(vdev, "Failed to enter D0I3\n");
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
index d83ccfd9a871..593b8ff07417 100644
--- a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
@@ -91,6 +91,7 @@
#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 3adcfa80fc0e..fa0af59e39ab 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -183,9 +183,7 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v
struct ivpu_ipc_info *ipc = vdev->ipc;
int ret;
- ret = mutex_lock_interruptible(&ipc->lock);
- if (ret)
- return ret;
+ mutex_lock(&ipc->lock);
if (!ipc->on) {
ret = -EAGAIN;
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 3c6f1e16cf2f..d45be0615b47 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -431,6 +431,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ww_acquire_ctx acquire_ctx;
+ enum dma_resv_usage usage;
struct ivpu_bo *bo;
int ret;
u32 i;
@@ -461,22 +462,28 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
- ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
+ &acquire_ctx);
if (ret) {
ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
- ret = dma_resv_reserve_fences(bo->base.resv, 1);
- if (ret) {
- ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
- goto unlock_reservations;
+ for (i = 0; i < buf_count; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ goto unlock_reservations;
+ }
}
- dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
+ for (i = 0; i < buf_count; i++) {
+ usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
+ dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
+ }
unlock_reservations:
- drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
wmb(); /* Flush write combining buffers */
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 694e978aba66..b8b259b3aa63 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -587,16 +587,11 @@ static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
- int ret;
-
- ret = mutex_lock_interruptible(&mmu->lock);
- if (ret)
- return ret;
+ int ret = 0;
- if (!mmu->on) {
- ret = 0;
+ mutex_lock(&mmu->lock);
+ if (!mmu->on)
goto unlock;
- }
ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
if (ret)
@@ -614,7 +609,7 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
u64 *entry;
u64 cd[4];
- int ret;
+ int ret = 0;
if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
return -EINVAL;
@@ -655,14 +650,9 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
- ret = mutex_lock_interruptible(&mmu->lock);
- if (ret)
- return ret;
-
- if (!mmu->on) {
- ret = 0;
+ mutex_lock(&mmu->lock);
+ if (!mmu->on)
goto unlock;
- }
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 9f216eb6f76e..5c57f7b4494e 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -997,14 +997,34 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
struct xfer_queue_elem elem;
struct wire_msg *out_buf;
struct wrapper_msg *w;
+ long ret = -EAGAIN;
+ int xfer_count = 0;
int retry_count;
- long ret;
if (qdev->in_reset) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENODEV);
}
+ /* Attempt to avoid a partial commit of a message */
+ list_for_each_entry(w, &wrappers->list, list)
+ xfer_count++;
+
+ for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
+ if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
+ ret = 0;
+ break;
+ }
+ msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
+ if (signal_pending(current))
+ break;
+ }
+
+ if (ret) {
+ mutex_unlock(&qdev->cntl_mutex);
+ return ERR_PTR(ret);
+ }
+
elem.seq_num = seq_num;
elem.buf = NULL;
init_completion(&elem.xfer_done);
@@ -1038,16 +1058,9 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
list_for_each_entry(w, &wrappers->list, list) {
kref_get(&w->ref_count);
retry_count = 0;
-retry:
ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
if (ret) {
- if (ret == -EAGAIN && retry_count++ < QAIC_MHI_RETRY_MAX) {
- msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
- if (!signal_pending(current))
- goto retry;
- }
-
qdev->cntl_lost_buf = true;
kref_put(&w->ref_count, free_wrapper);
mutex_unlock(&qdev->cntl_mutex);
@@ -1249,7 +1262,7 @@ dma_cont_failed:
int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- struct qaic_manage_msg *user_msg;
+ struct qaic_manage_msg *user_msg = data;
struct qaic_device *qdev;
struct manage_msg *msg;
struct qaic_user *usr;
@@ -1258,6 +1271,9 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
int usr_rcu_id;
int ret;
+ if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
+ return -EINVAL;
+
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1275,13 +1291,6 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
return -ENODEV;
}
- user_msg = data;
-
- if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) {
- ret = -EINVAL;
- goto out;
- }
-
msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c0a574cd1b35..e9a1cb779b30 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -23,6 +23,7 @@
#include <linux/wait.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
#include <drm/drm_print.h>
#include <uapi/drm/qaic_accel.h>
@@ -591,7 +592,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
struct qaic_bo *bo = to_qaic_bo(obj);
unsigned long offset = 0;
struct scatterlist *sg;
- int ret;
+ int ret = 0;
if (obj->import_attach)
return -EINVAL;
@@ -616,8 +617,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
if (obj->import_attach) {
/* DMABUF/PRIME Path */
- dma_buf_detach(obj->import_attach->dmabuf, obj->import_attach);
- dma_buf_put(obj->import_attach->dmabuf);
+ drm_prime_gem_destroy(obj, NULL);
} else {
/* Private buffer allocation path */
qaic_free_sgt(bo->sgt);
@@ -663,6 +663,10 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (args->pad)
return -EINVAL;
+ size = PAGE_ALIGN(args->size);
+ if (size == 0)
+ return -EINVAL;
+
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
@@ -677,12 +681,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
goto unlock_dev_srcu;
}
- size = PAGE_ALIGN(args->size);
- if (size == 0) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
bo = qaic_alloc_init_bo();
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
@@ -926,8 +924,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
{
struct qaic_attach_slice_entry *slice_ent;
struct qaic_attach_slice *args = data;
+ int rcu_id, usr_rcu_id, qdev_rcu_id;
struct dma_bridge_chan *dbc;
- int usr_rcu_id, qdev_rcu_id;
struct drm_gem_object *obj;
struct qaic_device *qdev;
unsigned long arg_size;
@@ -936,6 +934,22 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
struct qaic_bo *bo;
int ret;
+ if (args->hdr.count == 0)
+ return -EINVAL;
+
+ arg_size = args->hdr.count * sizeof(*slice_ent);
+ if (arg_size / args->hdr.count != sizeof(*slice_ent))
+ return -EINVAL;
+
+ if (args->hdr.size == 0)
+ return -EINVAL;
+
+ if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
+ return -EINVAL;
+
+ if (args->data == 0)
+ return -EINVAL;
+
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
@@ -950,43 +964,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto unlock_dev_srcu;
}
- if (args->hdr.count == 0) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
- arg_size = args->hdr.count * sizeof(*slice_ent);
- if (arg_size / args->hdr.count != sizeof(*slice_ent)) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
if (args->hdr.dbc_id >= qdev->num_dbc) {
ret = -EINVAL;
goto unlock_dev_srcu;
}
- if (args->hdr.size == 0) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
- if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
- dbc = &qdev->dbc[args->hdr.dbc_id];
- if (dbc->usr != usr) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
- if (args->data == 0) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
user_data = u64_to_user_ptr(args->data);
slice_ent = kzalloc(arg_size, GFP_KERNEL);
@@ -1013,9 +995,21 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
bo = to_qaic_bo(obj);
+ if (bo->sliced) {
+ ret = -EINVAL;
+ goto put_bo;
+ }
+
+ dbc = &qdev->dbc[args->hdr.dbc_id];
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (dbc->usr != usr) {
+ ret = -EINVAL;
+ goto unlock_ch_srcu;
+ }
+
ret = qaic_prepare_bo(qdev, bo, &args->hdr);
if (ret)
- goto put_bo;
+ goto unlock_ch_srcu;
ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
if (ret)
@@ -1025,6 +1019,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
bo->dbc = dbc;
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -1033,6 +1028,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
unprepare_bo:
qaic_unprepare_bo(qdev, bo);
+unlock_ch_srcu:
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
put_bo:
drm_gem_object_put(obj);
free_slice_ent:
@@ -1316,7 +1313,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
received_ts = ktime_get_ns();
size = is_partial ? sizeof(*pexec) : sizeof(*exec);
-
n = (unsigned long)size * args->hdr.count;
if (args->hdr.count == 0 || n / args->hdr.count != size)
return -EINVAL;
@@ -1665,6 +1661,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int rcu_id;
int ret;
+ if (args->pad != 0)
+ return -EINVAL;
+
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
if (!usr->qddev) {
@@ -1679,11 +1678,6 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_dev_srcu;
}
- if (args->pad != 0) {
- ret = -EINVAL;
- goto unlock_dev_srcu;
- }
-
if (args->dbc_id >= qdev->num_dbc) {
ret = -EINVAL;
goto unlock_dev_srcu;
@@ -1855,6 +1849,11 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
dbc->usr = NULL;
empty_xfer_list(qdev, dbc);
synchronize_srcu(&dbc->ch_lock);
+ /*
+ * Threads holding channel lock, may add more elements in the xfer_list.
+ * Flush out these elements from xfer_list.
+ */
+ empty_xfer_list(qdev, dbc);
}
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index ff80eb571729..b5ba550a0c04 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -97,6 +97,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file)
cleanup_usr:
cleanup_srcu_struct(&usr->qddev_lock);
+ ida_free(&qaic_usrs, usr->handle);
free_usr:
kfree(usr);
dev_unlock:
@@ -224,6 +225,9 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct qaic_user *usr;
qddev = qdev->qddev;
+ qdev->qddev = NULL;
+ if (!qddev)
+ return;
/*
* Existing users get unresolvable errors till they close FDs.
@@ -262,8 +266,8 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
+ u16 major = -1, minor = -1;
struct qaic_device *qdev;
- u16 major, minor;
int ret;
/*
diff --git a/drivers/acpi/acpi_ffh.c b/drivers/acpi/acpi_ffh.c
index 19aff808bbb8..8d5126963dc7 100644
--- a/drivers/acpi/acpi_ffh.c
+++ b/drivers/acpi/acpi_ffh.c
@@ -9,8 +9,6 @@
#include <linux/idr.h>
#include <linux/io.h>
-#include <linux/arm-smccc.h>
-
static struct acpi_ffh_info ffh_ctx;
int __weak acpi_ffh_address_space_arch_setup(void *handler_ctxt,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 77186f084d3a..539e700de4d2 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -201,11 +201,19 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
}
-/* BSW PWM used for backlight control by the i915 driver */
+/*
+ * BSW PWM1 is used for backlight control by the i915 driver
+ * BSW PWM2 is used for backlight control for fixed (etched into the glass)
+ * touch controls on some models. These touch-controls have specialized
+ * drivers which know they need the "pwm_soc_lpss_2" con-id.
+ */
static struct pwm_lookup bsw_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
"pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
+ PWM_LOOKUP_WITH_MODULE("80862289:00", 0, NULL,
+ "pwm_soc_lpss_2", 0, PWM_POLARITY_NORMAL,
+ "pwm-lpss-platform"),
};
static void bsw_pwm_setup(struct lpss_private_data *pdata)
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 02f1a1b1143c..7a453c5ff303 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -66,6 +66,7 @@ static void power_saving_mwait_init(void)
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index ebf8fd373cf7..79bbfe00d241 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -101,8 +101,6 @@ acpi_status
acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status);
-acpi_status acpi_hw_disable_all_gpes(void);
-
acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 1d6ef9654725..67c2c3b959e1 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,7 +7,6 @@
#ifndef APEI_INTERNAL_H
#define APEI_INTERNAL_H
-#include <linux/cper.h>
#include <linux/acpi.h>
struct apei_exec_context;
@@ -130,10 +129,5 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
return sizeof(*estatus) + estatus->data_length;
}
-void cper_estatus_print(const char *pfx,
- const struct acpi_hest_generic_status *estatus);
-int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
-int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
-
int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index c23eb75866d0..5427e49e646b 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/cper.h>
#include <linux/io.h>
#include "apei-internal.h"
@@ -33,7 +34,7 @@
#define ACPI_BERT_PRINT_MAX_RECORDS 5
#define ACPI_BERT_PRINT_MAX_LEN 1024
-static int bert_disable;
+static int bert_disable __initdata;
/*
* Print "all" the error records in the BERT table, but avoid huge spam to
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 34ad071a64e9..ef59d6ea16da 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -152,7 +152,6 @@ struct ghes_vendor_record_entry {
};
static struct gen_pool *ghes_estatus_pool;
-static unsigned long ghes_estatus_pool_size_request;
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
static atomic_t ghes_estatus_cache_alloced;
@@ -191,7 +190,6 @@ int ghes_estatus_pool_init(unsigned int num_ghes)
len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX;
len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE);
- ghes_estatus_pool_size_request = PAGE_ALIGN(len);
addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
if (!addr)
goto err_pool_alloc;
@@ -1544,6 +1542,8 @@ struct list_head *ghes_get_devices(void)
pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n");
}
+ } else if (list_empty(&ghes_devs)) {
+ return NULL;
}
return &ghes_devs;
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index e21a9e84e394..f81fe24894b2 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_AGDI) += agdi.o
obj-$(CONFIG_ACPI_IORT) += iort.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
-obj-y += dma.o
+obj-y += dma.o init.o
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
index f605302395c3..8b3c7d42b41b 100644
--- a/drivers/acpi/arm64/agdi.c
+++ b/drivers/acpi/arm64/agdi.c
@@ -9,11 +9,11 @@
#define pr_fmt(fmt) "ACPI: AGDI: " fmt
#include <linux/acpi.h>
-#include <linux/acpi_agdi.h>
#include <linux/arm_sdei.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include "init.h"
struct agdi_data {
int sdei_event;
diff --git a/drivers/acpi/arm64/apmt.c b/drivers/acpi/arm64/apmt.c
index 8cab69fa5d59..bb010f6164e5 100644
--- a/drivers/acpi/arm64/apmt.c
+++ b/drivers/acpi/arm64/apmt.c
@@ -10,10 +10,10 @@
#define pr_fmt(fmt) "ACPI: APMT: " fmt
#include <linux/acpi.h>
-#include <linux/acpi_apmt.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include "init.h"
#define DEV_NAME "arm-cs-arch-pmu"
@@ -35,11 +35,13 @@ static int __init apmt_init_resources(struct resource *res,
num_res++;
- res[num_res].start = node->base_address1;
- res[num_res].end = node->base_address1 + SZ_4K - 1;
- res[num_res].flags = IORESOURCE_MEM;
+ if (node->flags & ACPI_APMT_FLAGS_DUAL_PAGE) {
+ res[num_res].start = node->base_address1;
+ res[num_res].end = node->base_address1 + SZ_4K - 1;
+ res[num_res].flags = IORESOURCE_MEM;
- num_res++;
+ num_res++;
+ }
if (node->ovflw_irq != 0) {
trigger = (node->ovflw_irq_flags & ACPI_APMT_OVFLW_IRQ_FLAGS_MODE);
diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c
new file mode 100644
index 000000000000..d3ce53dda122
--- /dev/null
+++ b/drivers/acpi/arm64/init.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/acpi.h>
+#include "init.h"
+
+void __init acpi_arm_init(void)
+{
+ if (IS_ENABLED(CONFIG_ACPI_AGDI))
+ acpi_agdi_init();
+ if (IS_ENABLED(CONFIG_ACPI_APMT))
+ acpi_apmt_init();
+ if (IS_ENABLED(CONFIG_ACPI_IORT))
+ acpi_iort_init();
+}
diff --git a/drivers/acpi/arm64/init.h b/drivers/acpi/arm64/init.h
new file mode 100644
index 000000000000..a1715a2a34e9
--- /dev/null
+++ b/drivers/acpi/arm64/init.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/init.h>
+
+void __init acpi_agdi_init(void);
+void __init acpi_apmt_init(void);
+void __init acpi_iort_init(void);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 38fb84974f35..3631230a61c8 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
+#include "init.h"
#define IORT_TYPE_MASK(type) (1 << (type))
#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d161ff707de4..e3e0bd0c5a50 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -26,9 +26,6 @@
#include <asm/mpspec.h>
#include <linux/dmi.h>
#endif
-#include <linux/acpi_agdi.h>
-#include <linux/acpi_apmt.h>
-#include <linux/acpi_iort.h>
#include <linux/acpi_viot.h>
#include <linux/pci.h>
#include <acpi/apei.h>
@@ -530,65 +527,30 @@ static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
acpi_drv->ops.notify(device, event);
}
-static void acpi_notify_device_fixed(void *data)
-{
- struct acpi_device *device = data;
-
- /* Fixed hardware devices have no handles */
- acpi_notify_device(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
-}
-
-static u32 acpi_device_fixed_event(void *data)
-{
- acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_notify_device_fixed, data);
- return ACPI_INTERRUPT_HANDLED;
-}
-
static int acpi_device_install_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
- acpi_status status;
-
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
- status =
- acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
- acpi_device_fixed_event,
- device);
- } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
- status =
- acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
- acpi_device_fixed_event,
- device);
- } else {
- u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+ u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+ acpi_status status;
- status = acpi_install_notify_handler(device->handle, type,
- acpi_notify_device,
- device);
- }
-
+ status = acpi_install_notify_handler(device->handle, type,
+ acpi_notify_device, device);
if (ACPI_FAILURE(status))
return -EINVAL;
+
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
- if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
- acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
- acpi_device_fixed_event);
- } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
- acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
- acpi_device_fixed_event);
- } else {
- u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+ u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
- acpi_remove_notify_handler(device->handle, type,
- acpi_notify_device);
- }
+ acpi_remove_notify_handler(device->handle, type,
+ acpi_notify_device);
+
acpi_os_wait_events_complete();
}
@@ -1408,7 +1370,7 @@ static int __init acpi_init(void)
acpi_init_ffh();
pci_mmcfg_late_init();
- acpi_iort_init();
+ acpi_arm_init();
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
@@ -1420,8 +1382,6 @@ static int __init acpi_init(void)
acpi_debugger_init();
acpi_setup_sb_notify_handler();
acpi_viot_init();
- acpi_agdi_init();
- acpi_apmt_init();
return 0;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 475e1eddfa3b..1e76a64cce0a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -78,6 +78,15 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
},
{
+ /* Nextbook Ares 8A tablet, _LID device always reports lid closed */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ DMI_MATCH(DMI_BIOS_VERSION, "M882"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
+ },
+ {
/*
* Lenovo Yoga 9 14ITL5, initial notification of the LID device
* never happens.
@@ -126,7 +135,6 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
static int acpi_button_add(struct acpi_device *device);
static void acpi_button_remove(struct acpi_device *device);
-static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
static int acpi_button_suspend(struct device *dev);
@@ -144,7 +152,6 @@ static struct acpi_driver acpi_button_driver = {
.ops = {
.add = acpi_button_add,
.remove = acpi_button_remove,
- .notify = acpi_button_notify,
},
.drv.pm = &acpi_button_pm,
};
@@ -400,45 +407,65 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
button->lid_state_initialized = true;
}
-static void acpi_button_notify(struct acpi_device *device, u32 event)
+static void acpi_lid_notify(acpi_handle handle, u32 event, void *data)
{
- struct acpi_button *button = acpi_driver_data(device);
+ struct acpi_device *device = data;
+ struct acpi_button *button;
+
+ if (event != ACPI_BUTTON_NOTIFY_STATUS) {
+ acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
+ event);
+ return;
+ }
+
+ button = acpi_driver_data(device);
+ if (!button->lid_state_initialized)
+ return;
+
+ acpi_lid_update_state(device, true);
+}
+
+static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_device *device = data;
+ struct acpi_button *button;
struct input_dev *input;
+ int keycode;
- switch (event) {
- case ACPI_FIXED_HARDWARE_EVENT:
- event = ACPI_BUTTON_NOTIFY_STATUS;
- fallthrough;
- case ACPI_BUTTON_NOTIFY_STATUS:
- input = button->input;
- if (button->type == ACPI_BUTTON_TYPE_LID) {
- if (button->lid_state_initialized)
- acpi_lid_update_state(device, true);
- } else {
- int keycode;
-
- acpi_pm_wakeup_event(&device->dev);
- if (button->suspended)
- break;
-
- keycode = test_bit(KEY_SLEEP, input->keybit) ?
- KEY_SLEEP : KEY_POWER;
- input_report_key(input, keycode, 1);
- input_sync(input);
- input_report_key(input, keycode, 0);
- input_sync(input);
-
- acpi_bus_generate_netlink_event(
- device->pnp.device_class,
- dev_name(&device->dev),
- event, ++button->pushed);
- }
- break;
- default:
+ if (event != ACPI_BUTTON_NOTIFY_STATUS) {
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
- break;
+ return;
}
+
+ acpi_pm_wakeup_event(&device->dev);
+
+ button = acpi_driver_data(device);
+ if (button->suspended)
+ return;
+
+ input = button->input;
+ keycode = test_bit(KEY_SLEEP, input->keybit) ? KEY_SLEEP : KEY_POWER;
+
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev),
+ event, ++button->pushed);
+}
+
+static void acpi_button_notify_run(void *data)
+{
+ acpi_button_notify(NULL, ACPI_BUTTON_NOTIFY_STATUS, data);
+}
+
+static u32 acpi_button_event(void *data)
+{
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_button_notify_run, data);
+ return ACPI_INTERRUPT_HANDLED;
}
#ifdef CONFIG_PM_SLEEP
@@ -480,11 +507,13 @@ static int acpi_lid_input_open(struct input_dev *input)
static int acpi_button_add(struct acpi_device *device)
{
+ acpi_notify_handler handler;
struct acpi_button *button;
struct input_dev *input;
const char *hid = acpi_device_hid(device);
+ acpi_status status;
char *name, *class;
- int error;
+ int error = 0;
if (!strcmp(hid, ACPI_BUTTON_HID_LID) &&
lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED)
@@ -508,17 +537,20 @@ static int acpi_button_add(struct acpi_device *device)
if (!strcmp(hid, ACPI_BUTTON_HID_POWER) ||
!strcmp(hid, ACPI_BUTTON_HID_POWERF)) {
button->type = ACPI_BUTTON_TYPE_POWER;
+ handler = acpi_button_notify;
strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER);
} else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) ||
!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) {
button->type = ACPI_BUTTON_TYPE_SLEEP;
+ handler = acpi_button_notify;
strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP);
} else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) {
button->type = ACPI_BUTTON_TYPE_LID;
+ handler = acpi_lid_notify;
strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
@@ -526,12 +558,15 @@ static int acpi_button_add(struct acpi_device *device)
} else {
pr_info("Unsupported hid [%s]\n", hid);
error = -ENODEV;
- goto err_free_input;
}
- error = acpi_button_add_fs(device);
- if (error)
- goto err_free_input;
+ if (!error)
+ error = acpi_button_add_fs(device);
+
+ if (error) {
+ input_free_device(input);
+ goto err_free_button;
+ }
snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid);
@@ -559,6 +594,29 @@ static int acpi_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_remove_fs;
+
+ switch (device->device_type) {
+ case ACPI_BUS_TYPE_POWER_BUTTON:
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_button_event,
+ device);
+ break;
+ case ACPI_BUS_TYPE_SLEEP_BUTTON:
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_button_event,
+ device);
+ break;
+ default:
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY, handler,
+ device);
+ break;
+ }
+ if (ACPI_FAILURE(status)) {
+ error = -ENODEV;
+ goto err_input_unregister;
+ }
+
if (button->type == ACPI_BUTTON_TYPE_LID) {
/*
* This assumes there's only one lid device, or if there are
@@ -571,11 +629,11 @@ static int acpi_button_add(struct acpi_device *device)
pr_info("%s [%s]\n", name, acpi_device_bid(device));
return 0;
- err_remove_fs:
+err_input_unregister:
+ input_unregister_device(input);
+err_remove_fs:
acpi_button_remove_fs(device);
- err_free_input:
- input_free_device(input);
- err_free_button:
+err_free_button:
kfree(button);
return error;
}
@@ -584,6 +642,24 @@ static void acpi_button_remove(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
+ switch (device->device_type) {
+ case ACPI_BUS_TYPE_POWER_BUTTON:
+ acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_button_event);
+ break;
+ case ACPI_BUS_TYPE_SLEEP_BUTTON:
+ acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ acpi_button_event);
+ break;
+ default:
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ button->type == ACPI_BUTTON_TYPE_LID ?
+ acpi_lid_notify :
+ acpi_button_notify);
+ break;
+ }
+ acpi_os_wait_events_complete();
+
acpi_button_remove_fs(device);
input_unregister_device(button->input);
kfree(button);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 928899ab9502..8569f55e55b6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -662,21 +662,6 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt)
ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
- /*
- * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
- * changes to always trigger a GPE interrupt.
- *
- * GPE STS is a W1C register, which means:
- *
- * 1. Software can clear it without worrying about clearing the other
- * GPEs' STS bits when the hardware sets them in parallel.
- *
- * 2. As long as software can ensure only clearing it when it is set,
- * hardware won't set it in parallel.
- */
- if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
- acpi_clear_gpe(NULL, ec->gpe);
-
status = acpi_ec_read_status(ec);
/*
@@ -1287,6 +1272,22 @@ static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
+
+ /*
+ * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
+ * changes to always trigger a GPE interrupt.
+ *
+ * GPE STS is a W1C register, which means:
+ *
+ * 1. Software can clear it without worrying about clearing the other
+ * GPEs' STS bits when the hardware sets them in parallel.
+ *
+ * 2. As long as software can ensure only clearing it when it is set,
+ * hardware won't set it in parallel.
+ */
+ if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
+ acpi_clear_gpe(NULL, ec->gpe);
+
advance_transaction(ec, true);
spin_unlock_irqrestore(&ec->lock, flags);
}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 6023ad61831a..573bc0de2990 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -347,4 +347,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus);
extern struct device_attribute dev_attr_firmware_activate_noidle;
+void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem);
+
#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 9718d07cc2a2..dc615ef6550a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -597,10 +597,6 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
io_idle(cx->address);
} else
return -ENODEV;
-
-#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
- cond_wakeup_cpu0();
-#endif
}
/* Never reached */
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index e8492b3a393a..1dd8d5aebf67 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -470,47 +470,12 @@ static const struct dmi_system_id asus_laptop[] = {
{ }
};
-static const struct dmi_system_id lenovo_laptop[] = {
+static const struct dmi_system_id lg_laptop[] = {
{
- .ident = "LENOVO IdeaPad Flex 5 14ALC7",
+ .ident = "LG Electronics 17U70P",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
- },
- },
- {
- .ident = "LENOVO IdeaPad Flex 5 16ALC7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82RA"),
- },
- },
- { }
-};
-
-static const struct dmi_system_id tongfang_gm_rg[] = {
- {
- .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
- },
- },
- { }
-};
-
-static const struct dmi_system_id maingear_laptop[] = {
- {
- .ident = "MAINGEAR Vector Pro 2 15",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
- }
- },
- {
- .ident = "MAINGEAR Vector Pro 2 17",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
- DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
},
},
{ }
@@ -528,10 +493,7 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
- { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
- { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
- { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
- { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
@@ -550,16 +512,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
return entry->override;
}
-#ifdef CONFIG_X86
- /*
- * IRQ override isn't needed on modern AMD Zen systems and
- * this override breaks active low IRQs on AMD Ryzen 6000 and
- * newer systems. Skip it.
- */
- if (boot_cpu_has(X86_FEATURE_ZEN))
- return false;
-#endif
-
return true;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0c6f06abe3f4..1c3e1e2bb0b5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2029,8 +2029,6 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
return count;
}
-static bool acpi_bus_scan_second_pass;
-
static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
struct acpi_device **adev_p)
{
@@ -2050,10 +2048,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
return AE_OK;
/* Bail out if there are dependencies. */
- if (acpi_scan_check_dep(handle, check_dep) > 0) {
- acpi_bus_scan_second_pass = true;
+ if (acpi_scan_check_dep(handle, check_dep) > 0)
return AE_CTRL_DEPTH;
- }
fallthrough;
case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
@@ -2301,6 +2297,12 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
return true;
}
+static void acpi_scan_delete_dep_data(struct acpi_dep_data *dep)
+{
+ list_del(&dep->node);
+ kfree(dep);
+}
+
static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
{
struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer);
@@ -2311,8 +2313,10 @@ static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
acpi_dev_put(adev);
}
- list_del(&dep->node);
- kfree(dep);
+ if (dep->free_when_met)
+ acpi_scan_delete_dep_data(dep);
+ else
+ dep->met = true;
return 0;
}
@@ -2406,6 +2410,55 @@ struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
}
EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev);
+static void acpi_scan_postponed_branch(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ if (ACPI_FAILURE(acpi_bus_check_add(handle, false, &adev)))
+ return;
+
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_check_add_2, NULL, NULL, (void **)&adev);
+ acpi_bus_attach(adev, NULL);
+}
+
+static void acpi_scan_postponed(void)
+{
+ struct acpi_dep_data *dep, *tmp;
+
+ mutex_lock(&acpi_dep_list_lock);
+
+ list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
+ acpi_handle handle = dep->consumer;
+
+ /*
+ * In case there are multiple acpi_dep_list entries with the
+ * same consumer, skip the current entry if the consumer device
+ * object corresponding to it is present already.
+ */
+ if (!acpi_fetch_acpi_dev(handle)) {
+ /*
+ * Even though the lock is released here, tmp is
+ * guaranteed to be valid, because none of the list
+ * entries following dep is marked as "free when met"
+ * and so they cannot be deleted.
+ */
+ mutex_unlock(&acpi_dep_list_lock);
+
+ acpi_scan_postponed_branch(handle);
+
+ mutex_lock(&acpi_dep_list_lock);
+ }
+
+ if (dep->met)
+ acpi_scan_delete_dep_data(dep);
+ else
+ dep->free_when_met = true;
+ }
+
+ mutex_unlock(&acpi_dep_list_lock);
+}
+
/**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
* @handle: Root of the namespace scope to scan.
@@ -2424,8 +2477,6 @@ int acpi_bus_scan(acpi_handle handle)
{
struct acpi_device *device = NULL;
- acpi_bus_scan_second_pass = false;
-
/* Pass 1: Avoid enumerating devices with missing dependencies. */
if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device)))
@@ -2438,19 +2489,9 @@ int acpi_bus_scan(acpi_handle handle)
acpi_bus_attach(device, (void *)true);
- if (!acpi_bus_scan_second_pass)
- return 0;
-
/* Pass 2: Enumerate all of the remaining devices. */
- device = NULL;
-
- if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device)))
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_check_add_2, NULL, NULL,
- (void **)&device);
-
- acpi_bus_attach(device, NULL);
+ acpi_scan_postponed();
return 0;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 72470b9f16c4..808484d11209 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -636,11 +636,19 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
}
/*
- * Disable and clear GPE status before interrupt is enabled. Some GPEs
- * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
- * acpi_leave_sleep_state will reenable specific GPEs later
+ * Disable all GPE and clear their status bits before interrupts are
+ * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
+ * prevent them from producing spurious interrups.
+ *
+ * acpi_leave_sleep_state() will reenable specific GPEs later.
+ *
+ * Because this code runs on one CPU with disabled interrupts (all of
+ * the other CPUs are offline at this time), it need not acquire any
+ * sleeping locks which may trigger an implicit preemption point even
+ * if there is no contention, so avoid doing that by using a low-level
+ * library routine here.
*/
- acpi_disable_all_gpes();
+ acpi_hw_disable_all_gpes();
/* Allow EC transactions to happen. */
acpi_ec_unblock_transactions();
@@ -840,7 +848,7 @@ void __weak acpi_s2idle_setup(void)
s2idle_set_ops(&acpi_s2idle_ops);
}
-static void acpi_sleep_suspend_setup(void)
+static void __init acpi_sleep_suspend_setup(void)
{
bool suspend_ops_needed = false;
int i;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 4720a3649a61..f9f6ebb08fdb 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -40,12 +40,35 @@
#define ACPI_THERMAL_NOTIFY_HOT 0xF1
#define ACPI_THERMAL_MODE_ACTIVE 0x00
-#define ACPI_THERMAL_MAX_ACTIVE 10
-#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
+#define ACPI_THERMAL_MAX_ACTIVE 10
+#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
-MODULE_AUTHOR("Paul Diefenbaugh");
-MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
-MODULE_LICENSE("GPL");
+#define ACPI_TRIPS_CRITICAL BIT(0)
+#define ACPI_TRIPS_HOT BIT(1)
+#define ACPI_TRIPS_PASSIVE BIT(2)
+#define ACPI_TRIPS_ACTIVE BIT(3)
+#define ACPI_TRIPS_DEVICES BIT(4)
+
+#define ACPI_TRIPS_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE)
+
+#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \
+ ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \
+ ACPI_TRIPS_DEVICES)
+
+/*
+ * This exception is thrown out in two cases:
+ * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
+ * when re-evaluating the AML code.
+ * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
+ * We need to re-bind the cooling devices of a thermal zone when this occurs.
+ */
+#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \
+do { \
+ if (flags != ACPI_TRIPS_INIT) \
+ acpi_handle_info(tz->device->handle, \
+ "ACPI thermal trip point %s changed\n" \
+ "Please report to linux-acpi@vger.kernel.org\n", str); \
+} while (0)
static int act;
module_param(act, int, 0644);
@@ -73,75 +96,30 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static struct workqueue_struct *acpi_thermal_pm_queue;
-static int acpi_thermal_add(struct acpi_device *device);
-static void acpi_thermal_remove(struct acpi_device *device);
-static void acpi_thermal_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id thermal_device_ids[] = {
- {ACPI_THERMAL_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_thermal_suspend(struct device *dev);
-static int acpi_thermal_resume(struct device *dev);
-#else
-#define acpi_thermal_suspend NULL
-#define acpi_thermal_resume NULL
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
-
-static struct acpi_driver acpi_thermal_driver = {
- .name = "thermal",
- .class = ACPI_THERMAL_CLASS,
- .ids = thermal_device_ids,
- .ops = {
- .add = acpi_thermal_add,
- .remove = acpi_thermal_remove,
- .notify = acpi_thermal_notify,
- },
- .drv.pm = &acpi_thermal_pm,
-};
-
-struct acpi_thermal_state {
- u8 critical:1;
- u8 hot:1;
- u8 passive:1;
- u8 active:1;
- u8 reserved:4;
- int active_index;
-};
-
-struct acpi_thermal_state_flags {
- u8 valid:1;
- u8 enabled:1;
- u8 reserved:6;
-};
-
struct acpi_thermal_critical {
- struct acpi_thermal_state_flags flags;
unsigned long temperature;
+ bool valid;
};
struct acpi_thermal_hot {
- struct acpi_thermal_state_flags flags;
unsigned long temperature;
+ bool valid;
};
struct acpi_thermal_passive {
- struct acpi_thermal_state_flags flags;
+ struct acpi_handle_list devices;
unsigned long temperature;
unsigned long tc1;
unsigned long tc2;
unsigned long tsp;
- struct acpi_handle_list devices;
+ bool valid;
};
struct acpi_thermal_active {
- struct acpi_thermal_state_flags flags;
- unsigned long temperature;
struct acpi_handle_list devices;
+ unsigned long temperature;
+ bool valid;
+ bool enabled;
};
struct acpi_thermal_trips {
@@ -151,12 +129,6 @@ struct acpi_thermal_trips {
struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE];
};
-struct acpi_thermal_flags {
- u8 cooling_mode:1; /* _SCP */
- u8 devices:1; /* _TZD */
- u8 reserved:6;
-};
-
struct acpi_thermal {
struct acpi_device *device;
acpi_bus_id name;
@@ -164,8 +136,6 @@ struct acpi_thermal {
unsigned long last_temperature;
unsigned long polling_frequency;
volatile u8 zombie;
- struct acpi_thermal_flags flags;
- struct acpi_thermal_state state;
struct acpi_thermal_trips trips;
struct acpi_handle_list devices;
struct thermal_zone_device *thermal_zone;
@@ -220,52 +190,12 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
return 0;
}
-static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
-{
- if (!tz)
- return -EINVAL;
-
- if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle,
- "_SCP", mode)))
- return -ENODEV;
-
- return 0;
-}
-
-#define ACPI_TRIPS_CRITICAL 0x01
-#define ACPI_TRIPS_HOT 0x02
-#define ACPI_TRIPS_PASSIVE 0x04
-#define ACPI_TRIPS_ACTIVE 0x08
-#define ACPI_TRIPS_DEVICES 0x10
-
-#define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE)
-#define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES
-
-#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \
- ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \
- ACPI_TRIPS_DEVICES)
-
-/*
- * This exception is thrown out in two cases:
- * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
- * when re-evaluating the AML code.
- * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
- * We need to re-bind the cooling devices of a thermal zone when this occurs.
- */
-#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, tz, str) \
-do { \
- if (flags != ACPI_TRIPS_INIT) \
- acpi_handle_info(tz->device->handle, \
- "ACPI thermal trip point %s changed\n" \
- "Please report to linux-acpi@vger.kernel.org\n", str); \
-} while (0)
-
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
acpi_status status;
unsigned long long tmp;
struct acpi_handle_list devices;
- int valid = 0;
+ bool valid = false;
int i;
/* Critical Shutdown */
@@ -279,21 +209,21 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
* ... so lets discard those as invalid.
*/
if (ACPI_FAILURE(status)) {
- tz->trips.critical.flags.valid = 0;
+ tz->trips.critical.valid = false;
acpi_handle_debug(tz->device->handle,
"No critical threshold\n");
} else if (tmp <= 2732) {
pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
- tz->trips.critical.flags.valid = 0;
+ tz->trips.critical.valid = false;
} else {
- tz->trips.critical.flags.valid = 1;
+ tz->trips.critical.valid = true;
acpi_handle_debug(tz->device->handle,
"Found critical threshold [%lu]\n",
tz->trips.critical.temperature);
}
- if (tz->trips.critical.flags.valid) {
+ if (tz->trips.critical.valid) {
if (crt == -1) {
- tz->trips.critical.flags.valid = 0;
+ tz->trips.critical.valid = false;
} else if (crt > 0) {
unsigned long crt_k = celsius_to_deci_kelvin(crt);
@@ -312,12 +242,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (flag & ACPI_TRIPS_HOT) {
status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
- tz->trips.hot.flags.valid = 0;
+ tz->trips.hot.valid = false;
acpi_handle_debug(tz->device->handle,
"No hot threshold\n");
} else {
tz->trips.hot.temperature = tmp;
- tz->trips.hot.flags.valid = 1;
+ tz->trips.hot.valid = true;
acpi_handle_debug(tz->device->handle,
"Found hot threshold [%lu]\n",
tz->trips.hot.temperature);
@@ -325,9 +255,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
/* Passive (optional) */
- if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) ||
+ if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.valid) ||
flag == ACPI_TRIPS_INIT) {
- valid = tz->trips.passive.flags.valid;
+ valid = tz->trips.passive.valid;
if (psv == -1) {
status = AE_SUPPORT;
} else if (psv > 0) {
@@ -339,44 +269,44 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
if (ACPI_FAILURE(status)) {
- tz->trips.passive.flags.valid = 0;
+ tz->trips.passive.valid = false;
} else {
tz->trips.passive.temperature = tmp;
- tz->trips.passive.flags.valid = 1;
+ tz->trips.passive.valid = true;
if (flag == ACPI_TRIPS_INIT) {
status = acpi_evaluate_integer(tz->device->handle,
"_TC1", NULL, &tmp);
if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
+ tz->trips.passive.valid = false;
else
tz->trips.passive.tc1 = tmp;
status = acpi_evaluate_integer(tz->device->handle,
"_TC2", NULL, &tmp);
if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
+ tz->trips.passive.valid = false;
else
tz->trips.passive.tc2 = tmp;
status = acpi_evaluate_integer(tz->device->handle,
"_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
+ tz->trips.passive.valid = false;
else
tz->trips.passive.tsp = tmp;
}
}
}
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid passive threshold\n");
- tz->trips.passive.flags.valid = 0;
+ tz->trips.passive.valid = false;
} else {
- tz->trips.passive.flags.valid = 1;
+ tz->trips.passive.valid = true;
}
if (memcmp(&tz->trips.passive.devices, &devices,
@@ -387,24 +317,24 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
- if (valid != tz->trips.passive.flags.valid)
+ if (valid != tz->trips.passive.valid)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
- valid = tz->trips.active[i].flags.valid;
+ valid = tz->trips.active[i].valid;
if (act == -1)
break; /* disable all active trip points */
if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) &&
- tz->trips.active[i].flags.valid)) {
+ tz->trips.active[i].valid)) {
status = acpi_evaluate_integer(tz->device->handle,
name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
- tz->trips.active[i].flags.valid = 0;
+ tz->trips.active[i].valid = false;
if (i == 0)
break;
@@ -426,21 +356,21 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
break;
} else {
tz->trips.active[i].temperature = tmp;
- tz->trips.active[i].flags.valid = 1;
+ tz->trips.active[i].valid = true;
}
}
name[2] = 'L';
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid active%d threshold\n", i);
- tz->trips.active[i].flags.valid = 0;
+ tz->trips.active[i].valid = false;
} else {
- tz->trips.active[i].flags.valid = 1;
+ tz->trips.active[i].valid = true;
}
if (memcmp(&tz->trips.active[i].devices, &devices,
@@ -451,10 +381,10 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
}
if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
- if (valid != tz->trips.active[i].flags.valid)
+ if (valid != tz->trips.active[i].valid)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
- if (!tz->trips.active[i].flags.valid)
+ if (!tz->trips.active[i].valid)
break;
}
@@ -474,17 +404,18 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
{
- int i, valid, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
+ int i, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
+ bool valid;
if (ret)
return ret;
- valid = tz->trips.critical.flags.valid |
- tz->trips.hot.flags.valid |
- tz->trips.passive.flags.valid;
+ valid = tz->trips.critical.valid |
+ tz->trips.hot.valid |
+ tz->trips.passive.valid;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++)
- valid |= tz->trips.active[i].flags.valid;
+ valid = valid || tz->trips.active[i].valid;
if (!valid) {
pr_warn(FW_BUG "No valid trip found\n");
@@ -521,7 +452,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
if (!tz || trip < 0)
return -EINVAL;
- if (tz->trips.critical.flags.valid) {
+ if (tz->trips.critical.valid) {
if (!trip) {
*type = THERMAL_TRIP_CRITICAL;
return 0;
@@ -529,7 +460,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
trip--;
}
- if (tz->trips.hot.flags.valid) {
+ if (tz->trips.hot.valid) {
if (!trip) {
*type = THERMAL_TRIP_HOT;
return 0;
@@ -537,7 +468,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
trip--;
}
- if (tz->trips.passive.flags.valid) {
+ if (tz->trips.passive.valid) {
if (!trip) {
*type = THERMAL_TRIP_PASSIVE;
return 0;
@@ -545,7 +476,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
trip--;
}
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) {
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].valid; i++) {
if (!trip) {
*type = THERMAL_TRIP_ACTIVE;
return 0;
@@ -565,7 +496,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (!tz || trip < 0)
return -EINVAL;
- if (tz->trips.critical.flags.valid) {
+ if (tz->trips.critical.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.critical.temperature,
@@ -575,7 +506,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
trip--;
}
- if (tz->trips.hot.flags.valid) {
+ if (tz->trips.hot.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.hot.temperature,
@@ -585,7 +516,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
trip--;
}
- if (tz->trips.passive.flags.valid) {
+ if (tz->trips.passive.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.passive.temperature,
@@ -596,7 +527,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++) {
+ tz->trips.active[i].valid; i++) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
tz->trips.active[i].temperature,
@@ -614,7 +545,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
- if (tz->trips.critical.flags.valid) {
+ if (tz->trips.critical.valid) {
*temperature = deci_kelvin_to_millicelsius_with_offset(
tz->trips.critical.temperature,
tz->kelvin_offset);
@@ -700,13 +631,13 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
int trip = -1;
int result = 0;
- if (tz->trips.critical.flags.valid)
+ if (tz->trips.critical.valid)
trip++;
- if (tz->trips.hot.flags.valid)
+ if (tz->trips.hot.valid)
trip++;
- if (tz->trips.passive.flags.valid) {
+ if (tz->trips.passive.valid) {
trip++;
for (i = 0; i < tz->trips.passive.devices.count; i++) {
handle = tz->trips.passive.devices.handles[i];
@@ -731,7 +662,7 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!tz->trips.active[i].flags.valid)
+ if (!tz->trips.active[i].valid)
break;
trip++;
@@ -819,19 +750,19 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
acpi_status status;
int i;
- if (tz->trips.critical.flags.valid)
+ if (tz->trips.critical.valid)
trips++;
- if (tz->trips.hot.flags.valid)
+ if (tz->trips.hot.valid)
trips++;
- if (tz->trips.passive.flags.valid)
+ if (tz->trips.passive.valid)
trips++;
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid;
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].valid;
i++, trips++);
- if (tz->trips.passive.flags.valid)
+ if (tz->trips.passive.valid)
tz->thermal_zone = thermal_zone_device_register("acpitz", trips, 0, tz,
&acpi_thermal_zone_ops, NULL,
tz->trips.passive.tsp * 100,
@@ -906,13 +837,13 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
- acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_THRESHOLDS);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
- acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_DEVICES);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
@@ -976,9 +907,8 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
return result;
/* Set the cooling mode [_SCP] to active cooling (default) */
- result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE);
- if (!result)
- tz->flags.cooling_mode = 1;
+ acpi_execute_simple_method(tz->device->handle, "_SCP",
+ ACPI_THERMAL_MODE_ACTIVE);
/* Get default polling frequency [_TZP] (optional) */
if (tzp)
@@ -1001,7 +931,7 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
*/
static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
{
- if (tz->trips.critical.flags.valid &&
+ if (tz->trips.critical.valid &&
(tz->trips.critical.temperature % 5) == 1)
tz->kelvin_offset = 273100;
else
@@ -1110,27 +1040,48 @@ static int acpi_thermal_resume(struct device *dev)
return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!tz->trips.active[i].flags.valid)
+ if (!tz->trips.active[i].valid)
break;
- tz->trips.active[i].flags.enabled = 1;
+ tz->trips.active[i].enabled = true;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
result = acpi_bus_update_power(
tz->trips.active[i].devices.handles[j],
&power_state);
if (result || (power_state != ACPI_STATE_D0)) {
- tz->trips.active[i].flags.enabled = 0;
+ tz->trips.active[i].enabled = false;
break;
}
}
- tz->state.active |= tz->trips.active[i].flags.enabled;
}
acpi_queue_thermal_check(tz);
return AE_OK;
}
+#else
+#define acpi_thermal_suspend NULL
+#define acpi_thermal_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
+
+static const struct acpi_device_id thermal_device_ids[] = {
+ {ACPI_THERMAL_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+
+static struct acpi_driver acpi_thermal_driver = {
+ .name = "thermal",
+ .class = ACPI_THERMAL_CLASS,
+ .ids = thermal_device_ids,
+ .ops = {
+ .add = acpi_thermal_add,
+ .remove = acpi_thermal_remove,
+ .notify = acpi_thermal_notify,
+ },
+ .drv.pm = &acpi_thermal_pm,
+};
static int thermal_act(const struct dmi_system_id *d) {
if (act == 0) {
@@ -1236,3 +1187,7 @@ static void __exit acpi_thermal_exit(void)
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c
index 598f548b21f3..6353be6fec69 100644
--- a/drivers/acpi/tiny-power-button.c
+++ b/drivers/acpi/tiny-power-button.c
@@ -19,18 +19,52 @@ static const struct acpi_device_id tiny_power_button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, tiny_power_button_device_ids);
-static int acpi_noop_add(struct acpi_device *device)
+static void acpi_tiny_power_button_notify(acpi_handle handle, u32 event, void *data)
{
- return 0;
+ kill_cad_pid(power_signal, 1);
}
-static void acpi_noop_remove(struct acpi_device *device)
+static void acpi_tiny_power_button_notify_run(void *not_used)
{
+ acpi_tiny_power_button_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, NULL);
}
-static void acpi_tiny_power_button_notify(struct acpi_device *device, u32 event)
+static u32 acpi_tiny_power_button_event(void *not_used)
{
- kill_cad_pid(power_signal, 1);
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_tiny_power_button_notify_run, NULL);
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static int acpi_tiny_power_button_add(struct acpi_device *device)
+{
+ acpi_status status;
+
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_tiny_power_button_event,
+ NULL);
+ } else {
+ status = acpi_install_notify_handler(device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_tiny_power_button_notify,
+ NULL);
+ }
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void acpi_tiny_power_button_remove(struct acpi_device *device)
+{
+ if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
+ acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ acpi_tiny_power_button_event);
+ } else {
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_tiny_power_button_notify);
+ }
+ acpi_os_wait_events_complete();
}
static struct acpi_driver acpi_tiny_power_button_driver = {
@@ -38,9 +72,8 @@ static struct acpi_driver acpi_tiny_power_button_driver = {
.class = "tiny-power-button",
.ids = tiny_power_button_device_ids,
.ops = {
- .add = acpi_noop_add,
- .remove = acpi_noop_remove,
- .notify = acpi_tiny_power_button_notify,
+ .add = acpi_tiny_power_button_add,
+ .remove = acpi_tiny_power_button_remove,
},
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index bcc25d457581..18cc08c858cf 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -471,6 +471,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Lenovo ThinkPad X131e (3371 AMD version) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "3371"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Apple iMac11,3 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
@@ -514,6 +530,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Dell Studio 1569 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1569"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Acer Aspire 3830TG */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -828,6 +852,27 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
if (native_available)
return acpi_backlight_native;
+ /*
+ * The vendor specific BIOS interfaces are only necessary for
+ * laptops from before ~2008.
+ *
+ * For laptops from ~2008 till ~2023 this point is never reached
+ * because on those (video_caps & ACPI_VIDEO_BACKLIGHT) above is true.
+ *
+ * Laptops from after ~2023 no longer support ACPI_VIDEO_BACKLIGHT,
+ * if this point is reached on those, this likely means that
+ * the GPU kms driver which sets native_available has not loaded yet.
+ *
+ * Returning acpi_backlight_vendor in this case is known to sometimes
+ * cause a non working vendor specific /sys/class/backlight device to
+ * get registered.
+ *
+ * Return acpi_backlight_none on laptops with ACPI tables written
+ * for Windows 8 (laptops from after ~2012) to avoid this problem.
+ */
+ if (acpi_osi_is_win8())
+ return acpi_backlight_none;
+
/* No ACPI video/native (old hw), use vendor specific fw methods. */
return acpi_backlight_vendor;
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index e499c60c4579..ce62e61a9605 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -59,6 +59,7 @@ static int lps0_dsm_func_mask;
static guid_t lps0_dsm_guid_microsoft;
static int lps0_dsm_func_mask_microsoft;
+static int lps0_dsm_state;
/* Device constraint entry structure */
struct lpi_device_info {
@@ -320,6 +321,44 @@ static void lpi_check_constraints(void)
}
}
+static bool acpi_s2idle_vendor_amd(void)
+{
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+}
+
+static const char *acpi_sleep_dsm_state_to_str(unsigned int state)
+{
+ if (lps0_dsm_func_mask_microsoft || !acpi_s2idle_vendor_amd()) {
+ switch (state) {
+ case ACPI_LPS0_SCREEN_OFF:
+ return "screen off";
+ case ACPI_LPS0_SCREEN_ON:
+ return "screen on";
+ case ACPI_LPS0_ENTRY:
+ return "lps0 entry";
+ case ACPI_LPS0_EXIT:
+ return "lps0 exit";
+ case ACPI_LPS0_MS_ENTRY:
+ return "lps0 ms entry";
+ case ACPI_LPS0_MS_EXIT:
+ return "lps0 ms exit";
+ }
+ } else {
+ switch (state) {
+ case ACPI_LPS0_SCREEN_ON_AMD:
+ return "screen on";
+ case ACPI_LPS0_SCREEN_OFF_AMD:
+ return "screen off";
+ case ACPI_LPS0_ENTRY_AMD:
+ return "lps0 entry";
+ case ACPI_LPS0_EXIT_AMD:
+ return "lps0 exit";
+ }
+ }
+
+ return "unknown";
+}
+
static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
{
union acpi_object *out_obj;
@@ -331,14 +370,15 @@ static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, g
rev_id, func, NULL);
ACPI_FREE(out_obj);
- acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
- func, out_obj ? "successful" : "failed");
+ lps0_dsm_state = func;
+ if (pm_debug_messages_on) {
+ acpi_handle_info(lps0_device_handle,
+ "%s transitioned to state %s\n",
+ out_obj ? "Successfully" : "Failed to",
+ acpi_sleep_dsm_state_to_str(lps0_dsm_state));
+ }
}
-static bool acpi_s2idle_vendor_amd(void)
-{
- return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
-}
static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
{
@@ -485,11 +525,11 @@ int acpi_s2idle_prepare_late(void)
ACPI_LPS0_ENTRY,
lps0_dsm_func_mask, lps0_dsm_guid);
if (lps0_dsm_func_mask_microsoft > 0) {
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
/* modern standby entry */
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
@@ -524,11 +564,6 @@ void acpi_s2idle_restore_early(void)
if (handler->restore)
handler->restore();
- /* Modern standby exit */
- if (lps0_dsm_func_mask_microsoft > 0)
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
- lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-
/* LPS0 exit */
if (lps0_dsm_func_mask > 0)
acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
@@ -539,6 +574,11 @@ void acpi_s2idle_restore_early(void)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ /* Modern standby exit */
+ if (lps0_dsm_func_mask_microsoft > 0)
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+ lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+
/* Screen on */
if (lps0_dsm_func_mask_microsoft > 0)
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 9c2d6f35f88a..c2b925f8cd4e 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -259,10 +259,11 @@ bool force_storage_d3(void)
* drivers/platform/x86/x86-android-tablets.c kernel module.
*/
#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
-#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1)
-#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2)
-#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3)
-#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4)
+#define ACPI_QUIRK_UART1_SKIP BIT(1)
+#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2)
+#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3)
+#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
@@ -319,6 +320,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
@@ -365,7 +367,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
- /* Nextbook Ares 8 */
+ /* Nextbook Ares 8 (BYT version)*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
@@ -375,6 +377,16 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Nextbook Ares 8A (CHT version)*/
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ DMI_MATCH(DMI_BIOS_VERSION, "M882"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -392,6 +404,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
+ { "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
{ "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
@@ -438,6 +451,9 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
if (dmi_id)
quirks = (unsigned long)dmi_id->driver_data;
+ if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1)
+ *skip = true;
+
if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) {
if (uid == 1)
return -ENODEV; /* Create tty cdev instead of serdev */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fb56bfc45096..8fb7672021ee 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd)
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_buffer *buffer,
- binder_size_t failed_at,
+ binder_size_t off_end_offset,
bool is_failure)
{
int debug_id = buffer->debug_id;
- binder_size_t off_start_offset, buffer_offset, off_end_offset;
+ binder_size_t off_start_offset, buffer_offset;
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size,
- (unsigned long long)failed_at);
+ (unsigned long long)off_end_offset);
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
- off_end_offset = is_failure && failed_at ? failed_at :
- off_start_offset + buffer->offsets_size;
+
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
@@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
}
+/* Clean up all the objects in the buffer */
+static inline void binder_release_entire_buffer(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_buffer *buffer,
+ bool is_failure)
+{
+ binder_size_t off_end_offset;
+
+ off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
+ off_end_offset += buffer->offsets_size;
+
+ binder_transaction_buffer_release(proc, thread, buffer,
+ off_end_offset, is_failure);
+}
+
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
@@ -2806,7 +2820,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
t_outdated->buffer = NULL;
buffer->transaction = NULL;
trace_binder_transaction_update_buffer_release(buffer);
- binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
+ binder_release_entire_buffer(proc, NULL, buffer, false);
binder_alloc_free_buf(&proc->alloc, buffer);
kfree(t_outdated);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
@@ -3775,7 +3789,7 @@ binder_free_buf(struct binder_proc *proc,
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
+ binder_release_entire_buffer(proc, thread, buffer, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer);
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 55a3c3c2409f..662a2a2e2e84 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,8 +212,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->mm;
if (mm) {
- mmap_read_lock(mm);
- vma = vma_lookup(mm, alloc->vma_addr);
+ mmap_write_lock(mm);
+ vma = alloc->vma;
}
if (!vma && need_mm) {
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- mmap_read_unlock(mm);
+ mmap_write_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,21 +303,24 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- mmap_read_unlock(mm);
+ mmap_write_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ /* pairs with smp_load_acquire in binder_alloc_get_vma() */
+ smp_store_release(&alloc->vma, vma);
+}
+
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{
- struct vm_area_struct *vma = NULL;
-
- if (alloc->vma_addr)
- vma = vma_lookup(alloc->mm, alloc->vma_addr);
-
- return vma;
+ /* pairs with smp_store_release in binder_alloc_set_vma() */
+ return smp_load_acquire(&alloc->vma);
}
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
@@ -380,15 +383,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
- mmap_read_lock(alloc->mm);
+ /* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) {
- mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
- mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -778,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
- alloc->vma_addr = vma->vm_start;
+
+ /* Signal binder_alloc is fully initialized */
+ binder_alloc_set_vma(alloc, vma);
return 0;
@@ -808,8 +811,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma_addr &&
- vma_lookup(alloc->mm, alloc->vma_addr));
+ BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -916,25 +918,17 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
-
- mmap_read_lock(alloc->mm);
- if (binder_alloc_get_vma(alloc) == NULL) {
- mmap_read_unlock(alloc->mm);
- goto uninitialized;
- }
-
- mmap_read_unlock(alloc->mm);
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
+ if (binder_alloc_get_vma(alloc) != NULL) {
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
}
-
-uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@@ -969,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- alloc->vma_addr = 0;
+ binder_alloc_set_vma(alloc, NULL);
}
/**
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 0f811ac4bcff..138d1d5af9ce 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -75,7 +75,7 @@ struct binder_lru_page {
/**
* struct binder_alloc - per-binder proc state for binder allocator
* @mutex: protects binder_alloc fields
- * @vma_addr: vm_area_struct->vm_start passed to mmap_handler
+ * @vma: vm_area_struct passed to mmap_handler
* (invariant after mmap)
* @mm: copy of task->mm (invariant after open)
* @buffer: base of per-proc address space mapped via mmap
@@ -99,7 +99,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- unsigned long vma_addr;
+ struct vm_area_struct *vma;
struct mm_struct *mm;
void __user *buffer;
struct list_head buffers;
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 43a881073a42..c2b323bc3b3a 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma_addr)
+ if (!binder_selftest_run || !alloc->vma)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8bf612bdd61a..b4f246f0cac7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5348,7 +5348,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
- INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
+ INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
init_completion(&ap->park_req_pending);
@@ -5954,6 +5954,7 @@ static void ata_port_detach(struct ata_port *ap)
WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
cancel_delayed_work_sync(&ap->hotplug_task);
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
skip_eh:
/* clean up zpodd on port removal */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a6c901811802..6f8d14191593 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2984,7 +2984,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
ehc->i.flags |= ATA_EHI_SETMODE;
/* schedule the scsi_rescan_device() here */
- schedule_work(&(ap->scsi_rescan_task));
+ schedule_delayed_work(&ap->scsi_rescan_task, 0);
} else if (dev->class == ATA_DEV_UNKNOWN &&
ehc->tries[dev->devno] &&
ata_class_enabled(ehc->classes[dev->devno])) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7bb12deab70c..551077cea4e4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2694,18 +2694,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
return 0;
}
-static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
{
- if (!sata_pmp_attached(ap)) {
- if (likely(devno >= 0 &&
- devno < ata_link_max_devices(&ap->link)))
+ /*
+ * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
+ * or 2 (IDE master + slave case). However, the former case includes
+ * libsas hosted devices which are numbered per scsi host, leading
+ * to devno potentially being larger than 0 but with each struct
+ * ata_device having its own struct ata_port and struct ata_link.
+ * To accommodate these, ignore devno and always use device number 0.
+ */
+ if (likely(!sata_pmp_attached(ap))) {
+ int link_max_devices = ata_link_max_devices(&ap->link);
+
+ if (link_max_devices == 1)
+ return &ap->link.device[0];
+
+ if (devno < link_max_devices)
return &ap->link.device[devno];
- } else {
- if (likely(devno >= 0 &&
- devno < ap->nr_pmp_links))
- return &ap->pmp_link[devno].device[0];
+
+ return NULL;
}
+ /*
+ * For PMP-attached devices, the device number corresponds to C
+ * (channel) of SCSI [H:C:I:L], indicating the port pmp link
+ * for the device.
+ */
+ if (devno < ap->nr_pmp_links)
+ return &ap->pmp_link[devno].device[0];
+
return NULL;
}
@@ -4579,10 +4597,11 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
void ata_scsi_dev_rescan(struct work_struct *work)
{
struct ata_port *ap =
- container_of(work, struct ata_port, scsi_rescan_task);
+ container_of(work, struct ata_port, scsi_rescan_task.work);
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
+ bool delay_rescan = false;
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
@@ -4596,6 +4615,21 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (scsi_device_get(sdev))
continue;
+ /*
+ * If the rescan work was scheduled because of a resume
+ * event, the port is already fully resumed, but the
+ * SCSI device may not yet be fully resumed. In such
+ * case, executing scsi_rescan_device() may cause a
+ * deadlock with the PM code on device_lock(). Prevent
+ * this by giving up and retrying rescan after a short
+ * delay.
+ */
+ delay_rescan = sdev->sdev_gendev.power.is_suspended;
+ if (delay_rescan) {
+ scsi_device_put(sdev);
+ break;
+ }
+
spin_unlock_irqrestore(ap->lock, flags);
scsi_rescan_device(&(sdev->sdev_gendev));
scsi_device_put(sdev);
@@ -4605,4 +4639,8 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
+
+ if (delay_rescan)
+ schedule_delayed_work(&ap->scsi_rescan_task,
+ msecs_to_jiffies(5));
}
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 02425991c159..d44814b9562a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -820,7 +820,7 @@ static const struct of_device_id ht16k33_of_match[] = {
MODULE_DEVICE_TABLE(of, ht16k33_of_match);
static struct i2c_driver ht16k33_driver = {
- .probe_new = ht16k33_probe,
+ .probe = ht16k33_probe,
.remove = ht16k33_remove,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 135831a16514..6422be0dfe20 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -365,7 +365,7 @@ static struct i2c_driver lcd2s_i2c_driver = {
.name = "lcd2s",
.of_match_table = lcd2s_of_table,
},
- .probe_new = lcd2s_i2c_probe,
+ .probe = lcd2s_i2c_probe,
.remove = lcd2s_i2c_remove,
.id_table = lcd2s_i2c_id,
};
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index bba3482ddeb8..cbae8be1fe52 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -388,6 +388,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+
+ /*
+ * Comparing cache IDs only makes sense if the leaves
+ * belong to the same cache level of same type. Skip
+ * the check if level and type do not match.
+ */
+ if (sib_leaf->level != this_leaf->level ||
+ sib_leaf->type != this_leaf->type)
+ continue;
+
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@@ -400,11 +410,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
coherency_max_size = this_leaf->coherency_line_size;
}
+ /* shared_cpu_map is now populated for the cpu */
+ this_cpu_ci->cpu_map_populated = true;
return 0;
}
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int sibling, index, sib_index;
@@ -419,6 +432,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+
+ /*
+ * Comparing cache IDs only makes sense if the leaves
+ * belong to the same cache level of same type. Skip
+ * the check if level and type do not match.
+ */
+ if (sib_leaf->level != this_leaf->level ||
+ sib_leaf->type != this_leaf->type)
+ continue;
+
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -427,6 +450,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
}
+
+ /* cpu is no longer populated in the shared map */
+ this_cpu_ci->cpu_map_populated = false;
}
static void free_cache_attributes(unsigned int cpu)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index ac1808d1a2e8..05d9df90f621 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -320,6 +320,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
start_knode = &start->p->knode_class;
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
+ iter->sp = sp;
}
EXPORT_SYMBOL_GPL(class_dev_iter_init);
@@ -361,6 +362,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_next);
void class_dev_iter_exit(struct class_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
+ subsys_put(iter->sp);
}
EXPORT_SYMBOL_GPL(class_dev_iter_exit);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9c09ca5c4ab6..878aa7646b37 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -751,14 +751,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
*
* Should somehow figure out how to use a semaphore, not an atomic variable...
*/
-int driver_probe_done(void)
+bool __init driver_probe_done(void)
{
int local_probe_count = atomic_read(&probe_count);
pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
- if (local_probe_count)
- return -EBUSY;
- return 0;
+ return !local_probe_count;
}
/**
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 9d79d5ad9102..b58c42f1b1ce 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -812,7 +812,7 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
char *outbuf;
alg = crypto_alloc_shash("sha256", 0, 0);
- if (!alg)
+ if (IS_ERR(alg))
return;
sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index b46db17124f3..655975946ef6 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -449,6 +449,9 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d FileHugePages: %8lu kB\n"
"Node %d FilePmdMapped: %8lu kB\n"
#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ "Node %d Unaccepted: %8lu kB\n"
+#endif
,
nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
nid, K(node_page_state(pgdat, NR_WRITEBACK)),
@@ -478,6 +481,10 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_FILE_THPS)),
nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ ,
+ nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED))
+#endif
);
len += hugetlb_report_node_meminfo(buf, len, nid);
return len;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 32084e38b73d..5cb2023581d4 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1632,9 +1632,6 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
dev_dbg(dev, "%s()\n", __func__);
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
- return -EINVAL;
-
gpd_data = genpd_alloc_dev_data(dev, gd);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
@@ -1676,6 +1673,9 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
{
int ret;
+ if (!genpd || !dev)
+ return -EINVAL;
+
mutex_lock(&gpd_list_lock);
ret = genpd_add_device(genpd, dev, dev);
mutex_unlock(&gpd_list_lock);
@@ -2523,6 +2523,9 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
struct generic_pm_domain *genpd;
int ret;
+ if (!dev)
+ return -EINVAL;
+
mutex_lock(&gpd_list_lock);
genpd = genpd_get_from_provider(genpdspec);
@@ -2939,10 +2942,10 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "min-residency-us", &residency);
if (!err)
- genpd_state->residency_ns = 1000 * residency;
+ genpd_state->residency_ns = 1000LL * residency;
- genpd_state->power_on_latency_ns = 1000 * exit_latency;
- genpd_state->power_off_latency_ns = 1000 * entry_latency;
+ genpd_state->power_on_latency_ns = 1000LL * exit_latency;
+ genpd_state->power_off_latency_ns = 1000LL * entry_latency;
genpd_state->fwnode = &state_node->fwnode;
return 0;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 7cc0c0cf8eaa..a917219feea6 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -19,11 +19,6 @@
#include "power.h"
-#ifndef CONFIG_SUSPEND
-suspend_state_t pm_suspend_target_state;
-#define pm_suspend_target_state (PM_SUSPEND_ON)
-#endif
-
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
srcu_read_lock_held(&wakeup_srcu))
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 33a8366e22a5..0db2021f7477 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,16 +4,23 @@
# subsystems should select the appropriate symbols.
config REGMAP
+ bool "Register Map support" if KUNIT_ALL_TESTS
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
select IRQ_DOMAIN if REGMAP_IRQ
select MDIO_BUS if REGMAP_MDIO
- bool
+ help
+ Enable support for the Register Map (regmap) access API.
+
+ Usually, this option is automatically selected when needed.
+ However, you may want to enable it manually for running the regmap
+ KUnit tests.
+
+ If unsure, say N.
config REGMAP_KUNIT
tristate "KUnit tests for regmap"
- depends on KUNIT
+ depends on KUNIT && REGMAP
default KUNIT_ALL_TESTS
- select REGMAP
select REGMAP_RAM
config REGMAP_AC97
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 9b1b559107ef..c2e3a0f6c218 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -203,15 +203,18 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
mas_for_each(&mas, entry, max) {
for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
+ mas_pause(&mas);
+ rcu_read_unlock();
ret = regcache_sync_val(map, r, entry[r - mas.index]);
if (ret != 0)
goto out;
+ rcu_read_lock();
}
}
-out:
rcu_read_unlock();
+out:
map->cache_bypass = false;
return ret;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 029564695dbb..97c681fcf9f6 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -284,6 +284,9 @@ static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
{
int ret;
+ if (!regmap_writeable(map, reg))
+ return false;
+
/* If we don't know the chip just got reset, then sync everything. */
if (!map->no_sync_defaults)
return true;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 09899ae99fc1..159c0b740b00 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -59,6 +59,10 @@ static int regmap_sdw_config_check(const struct regmap_config *config)
if (config->pad_bits != 0)
return -ENOTSUPP;
+ /* Only bulk writes are supported not multi-register writes */
+ if (config->can_multi_write)
+ return -ENOTSUPP;
+
return 0;
}
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index 4c2b94b3e30b..6af692844c19 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
- .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
+ .max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
.free_context = spi_avmm_bridge_ctx_free,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index db7851f0e3b8..fa2d3fba6ac9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2082,6 +2082,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t val_count = val_len / val_bytes;
size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count;
+ size_t max_data = map->max_raw_write - map->format.reg_bytes -
+ map->format.pad_bytes;
int ret, i;
if (!val_count)
@@ -2089,8 +2091,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->use_single_write)
chunk_regs = 1;
- else if (map->max_raw_write && val_len > map->max_raw_write)
- chunk_regs = map->max_raw_write / val_bytes;
+ else if (map->max_raw_write && val_len > max_data)
+ chunk_regs = max_data / val_bytes;
chunk_count = val_count / chunk_regs;
chunk_bytes = chunk_regs * val_bytes;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4c8b2ba579ee..e460c9799d9f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1532,7 +1532,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
@@ -1607,7 +1607,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
@@ -1654,10 +1654,10 @@ static void fd_probe(int dev)
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
-static int floppy_open(struct block_device *bdev, fmode_t mode)
+static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
- int drive = MINOR(bdev->bd_dev) & 3;
- int system = (MINOR(bdev->bd_dev) & 4) >> 2;
+ int drive = disk->first_minor & 3;
+ int system = (disk->first_minor & 4) >> 2;
int old_dev;
unsigned long flags;
@@ -1673,10 +1673,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&amiflop_mutex);
return -ENXIO;
}
-
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- bdev_check_media_change(bdev);
- if (mode & FMODE_WRITE) {
+ if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
+ disk_check_media_change(disk);
+ if (mode & BLK_OPEN_WRITE) {
int wrprot;
get_fdc(drive);
@@ -1691,7 +1690,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
}
}
}
-
local_irq_save(flags);
fd_ref[drive]++;
fd_device[drive] = system;
@@ -1709,7 +1707,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static void floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk)
{
struct amiga_floppy_struct *p = disk->private_data;
int drive = p - unit;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 128722cf6c3c..cf6883756155 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -204,9 +204,9 @@ aoedisk_rm_debugfs(struct aoedev *d)
}
static int
-aoeblk_open(struct block_device *bdev, fmode_t mode)
+aoeblk_open(struct gendisk *disk, blk_mode_t mode)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
ulong flags;
if (!virt_addr_valid(d)) {
@@ -232,7 +232,7 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
}
static void
-aoeblk_release(struct gendisk *disk, fmode_t mode)
+aoeblk_release(struct gendisk *disk)
{
struct aoedev *d = disk->private_data;
ulong flags;
@@ -285,7 +285,7 @@ aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
static int
-aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
+aoeblk_ioctl(struct block_device *bdev, blk_mode_t mode, uint cmd, ulong arg)
{
struct aoedev *d;
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 4c666f72203f..a42c4bcc85ba 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -49,7 +49,7 @@ static int emsgs_head_idx, emsgs_tail_idx;
static struct completion emsgs_comp;
static spinlock_t emsgs_lock;
static int nblocked_emsgs_readers;
-static struct class *aoe_class;
+
static struct aoe_chardev chardevs[] = {
{ MINOR_ERR, "err" },
{ MINOR_DISCOVER, "discover" },
@@ -58,6 +58,16 @@ static struct aoe_chardev chardevs[] = {
{ MINOR_FLUSH, "flush" },
};
+static char *aoe_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
+}
+
+static const struct class aoe_class = {
+ .name = "aoe",
+ .devnode = aoe_devnode,
+};
+
static int
discover(void)
{
@@ -273,11 +283,6 @@ static const struct file_operations aoe_fops = {
.llseek = noop_llseek,
};
-static char *aoe_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
-}
-
int __init
aoechr_init(void)
{
@@ -290,15 +295,14 @@ aoechr_init(void)
}
init_completion(&emsgs_comp);
spin_lock_init(&emsgs_lock);
- aoe_class = class_create("aoe");
- if (IS_ERR(aoe_class)) {
+ n = class_register(&aoe_class);
+ if (n) {
unregister_chrdev(AOE_MAJOR, "aoechr");
- return PTR_ERR(aoe_class);
+ return n;
}
- aoe_class->devnode = aoe_devnode;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
- device_create(aoe_class, NULL,
+ device_create(&aoe_class, NULL,
MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
chardevs[i].name);
@@ -311,8 +315,8 @@ aoechr_exit(void)
int i;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
- device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
- class_destroy(aoe_class);
+ device_destroy(&aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
+ class_unregister(&aoe_class);
unregister_chrdev(AOE_MAJOR, "aoechr");
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 9deb4df6bdb8..cd738cab725f 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -442,13 +442,13 @@ static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
-static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
- cmd, unsigned long param);
+static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long param);
static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
static void config_types( void );
-static int floppy_open(struct block_device *bdev, fmode_t mode);
-static void floppy_release(struct gendisk *disk, fmode_t mode);
+static int floppy_open(struct gendisk *disk, blk_mode_t mode);
+static void floppy_release(struct gendisk *disk);
/************************* End of Prototypes **************************/
@@ -1581,7 +1581,7 @@ out:
return BLK_STS_OK;
}
-static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct gendisk *disk = bdev->bd_disk;
@@ -1760,15 +1760,15 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
+ if (disk_check_media_change(disk))
+ floppy_revalidate(disk);
return 0;
default:
return -EINVAL;
}
}
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret;
@@ -1915,32 +1915,31 @@ static void __init config_types( void )
* drive with different device numbers.
*/
-static int floppy_open(struct block_device *bdev, fmode_t mode)
+static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
- struct atari_floppy_struct *p = bdev->bd_disk->private_data;
- int type = MINOR(bdev->bd_dev) >> 2;
+ struct atari_floppy_struct *p = disk->private_data;
+ int type = disk->first_minor >> 2;
DPRINT(("fd_open: type=%d\n",type));
if (p->ref && p->type != type)
return -EBUSY;
- if (p->ref == -1 || (p->ref && mode & FMODE_EXCL))
+ if (p->ref == -1 || (p->ref && mode & BLK_OPEN_EXCL))
return -EBUSY;
-
- if (mode & FMODE_EXCL)
+ if (mode & BLK_OPEN_EXCL)
p->ref = -1;
else
p->ref++;
p->type = type;
- if (mode & FMODE_NDELAY)
+ if (mode & BLK_OPEN_NDELAY)
return 0;
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
- if (mode & FMODE_WRITE) {
+ if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
+ if (disk_check_media_change(disk))
+ floppy_revalidate(disk);
+ if (mode & BLK_OPEN_WRITE) {
if (p->wpstat) {
if (p->ref < 0)
p->ref = 0;
@@ -1953,18 +1952,18 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
mutex_lock(&ataflop_mutex);
- ret = floppy_open(bdev, mode);
+ ret = floppy_open(disk, mode);
mutex_unlock(&ataflop_mutex);
return ret;
}
-static void floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk)
{
struct atari_floppy_struct *p = disk->private_data;
mutex_lock(&ataflop_mutex);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bcad9b926b0c..970bd6ff38c4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -19,7 +19,7 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
@@ -28,7 +28,7 @@
#include <linux/uaccess.h>
/*
- * Each block ramdisk device has a radix_tree brd_pages of pages that stores
+ * Each block ramdisk device has a xarray brd_pages of pages that stores
* the pages containing the block device's contents. A brd page's ->index is
* its offset in PAGE_SIZE units. This is similar to, but in no way connected
* with, the kernel's pagecache or buffer cache (which sit above our block
@@ -40,11 +40,9 @@ struct brd_device {
struct list_head brd_list;
/*
- * Backing store of pages and lock to protect it. This is the contents
- * of the block device.
+ * Backing store of pages. This is the contents of the block device.
*/
- spinlock_t brd_lock;
- struct radix_tree_root brd_pages;
+ struct xarray brd_pages;
u64 brd_nr_pages;
};
@@ -56,21 +54,8 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
pgoff_t idx;
struct page *page;
- /*
- * The page lifetime is protected by the fact that we have opened the
- * device node -- brd pages will never be deleted under us, so we
- * don't need any further locking or refcounting.
- *
- * This is strictly true for the radix-tree nodes as well (ie. we
- * don't actually need the rcu_read_lock()), however that is not a
- * documented feature of the radix-tree API so it is better to be
- * safe here (we don't have total exclusion from radix tree updates
- * here, only deletes).
- */
- rcu_read_lock();
idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
- page = radix_tree_lookup(&brd->brd_pages, idx);
- rcu_read_unlock();
+ page = xa_load(&brd->brd_pages, idx);
BUG_ON(page && page->index != idx);
@@ -83,7 +68,7 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
{
pgoff_t idx;
- struct page *page;
+ struct page *page, *cur;
int ret = 0;
page = brd_lookup_page(brd, sector);
@@ -94,71 +79,42 @@ static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
if (!page)
return -ENOMEM;
- if (radix_tree_maybe_preload(gfp)) {
- __free_page(page);
- return -ENOMEM;
- }
+ xa_lock(&brd->brd_pages);
- spin_lock(&brd->brd_lock);
idx = sector >> PAGE_SECTORS_SHIFT;
page->index = idx;
- if (radix_tree_insert(&brd->brd_pages, idx, page)) {
+
+ cur = __xa_cmpxchg(&brd->brd_pages, idx, NULL, page, gfp);
+
+ if (unlikely(cur)) {
__free_page(page);
- page = radix_tree_lookup(&brd->brd_pages, idx);
- if (!page)
- ret = -ENOMEM;
- else if (page->index != idx)
+ ret = xa_err(cur);
+ if (!ret && (cur->index != idx))
ret = -EIO;
} else {
brd->brd_nr_pages++;
}
- spin_unlock(&brd->brd_lock);
- radix_tree_preload_end();
+ xa_unlock(&brd->brd_pages);
+
return ret;
}
/*
- * Free all backing store pages and radix tree. This must only be called when
+ * Free all backing store pages and xarray. This must only be called when
* there are no other users of the device.
*/
-#define FREE_BATCH 16
static void brd_free_pages(struct brd_device *brd)
{
- unsigned long pos = 0;
- struct page *pages[FREE_BATCH];
- int nr_pages;
-
- do {
- int i;
-
- nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
- (void **)pages, pos, FREE_BATCH);
-
- for (i = 0; i < nr_pages; i++) {
- void *ret;
-
- BUG_ON(pages[i]->index < pos);
- pos = pages[i]->index;
- ret = radix_tree_delete(&brd->brd_pages, pos);
- BUG_ON(!ret || ret != pages[i]);
- __free_page(pages[i]);
- }
-
- pos++;
+ struct page *page;
+ pgoff_t idx;
- /*
- * It takes 3.4 seconds to remove 80GiB ramdisk.
- * So, we need cond_resched to avoid stalling the CPU.
- */
+ xa_for_each(&brd->brd_pages, idx, page) {
+ __free_page(page);
cond_resched();
+ }
- /*
- * This assumes radix_tree_gang_lookup always returns as
- * many pages as possible. If the radix-tree code changes,
- * so will this have to.
- */
- } while (nr_pages == FREE_BATCH);
+ xa_destroy(&brd->brd_pages);
}
/*
@@ -372,8 +328,7 @@ static int brd_alloc(int i)
brd->brd_number = i;
list_add_tail(&brd->brd_list, &brd_devices);
- spin_lock_init(&brd->brd_lock);
- INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
+ xa_init(&brd->brd_pages);
snprintf(buf, DISK_NAME_LEN, "ram%d", i);
if (!IS_ERR_OR_NULL(brd_debugfs_dir))
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 6ac8c54b44c7..85ca000a0564 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1043,9 +1043,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op, GFP_NOIO,
&drbd_md_io_bio_set);
bio->bi_iter.bi_sector = on_disk_sector;
- /* bio_add_page of a single page to an empty bio will always succeed,
- * according to api. Do we want to assert that? */
- bio_add_page(bio, page, len, 0);
+ __bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 83987e7a5ef2..965f672557f2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -37,7 +37,6 @@
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
-#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
@@ -50,8 +49,8 @@
#include "drbd_debugfs.h"
static DEFINE_MUTEX(drbd_main_mutex);
-static int drbd_open(struct block_device *bdev, fmode_t mode);
-static void drbd_release(struct gendisk *gd, fmode_t mode);
+static int drbd_open(struct gendisk *disk, blk_mode_t mode);
+static void drbd_release(struct gendisk *gd);
static void md_sync_timer_fn(struct timer_list *t);
static int w_bitmap_io(struct drbd_work *w, int unused);
@@ -1883,9 +1882,9 @@ int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void
return 0;
}
-static int drbd_open(struct block_device *bdev, fmode_t mode)
+static int drbd_open(struct gendisk *disk, blk_mode_t mode)
{
- struct drbd_device *device = bdev->bd_disk->private_data;
+ struct drbd_device *device = disk->private_data;
unsigned long flags;
int rv = 0;
@@ -1895,7 +1894,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
* and no race with updating open_cnt */
if (device->state.role != R_PRIMARY) {
- if (mode & FMODE_WRITE)
+ if (mode & BLK_OPEN_WRITE)
rv = -EROFS;
else if (!drbd_allow_oos)
rv = -EMEDIUMTYPE;
@@ -1909,9 +1908,10 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
return rv;
}
-static void drbd_release(struct gendisk *gd, fmode_t mode)
+static void drbd_release(struct gendisk *gd)
{
struct drbd_device *device = gd->private_data;
+
mutex_lock(&drbd_main_mutex);
device->open_cnt--;
mutex_unlock(&drbd_main_mutex);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1a5d3d72d91d..cddae6f4b00f 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1640,8 +1640,8 @@ static struct block_device *open_backing_dev(struct drbd_device *device,
struct block_device *bdev;
int err = 0;
- bdev = blkdev_get_by_path(bdev_path,
- FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
+ bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ claim_ptr, NULL);
if (IS_ERR(bdev)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
bdev_path, PTR_ERR(bdev));
@@ -1653,7 +1653,7 @@ static struct block_device *open_backing_dev(struct drbd_device *device,
err = bd_link_disk_holder(bdev, device->vdisk);
if (err) {
- blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(bdev, claim_ptr);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
bdev = ERR_PTR(err);
@@ -1695,13 +1695,13 @@ static int open_backing_devices(struct drbd_device *device,
}
static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
- bool do_bd_unlink)
+ void *claim_ptr, bool do_bd_unlink)
{
if (!bdev)
return;
if (do_bd_unlink)
bd_unlink_disk_holder(bdev, device->vdisk);
- blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(bdev, claim_ptr);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
@@ -1709,8 +1709,11 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *
if (ldev == NULL)
return;
- close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
- close_backing_dev(device, ldev->backing_bdev, true);
+ close_backing_dev(device, ldev->md_bdev,
+ ldev->md.meta_dev_idx < 0 ?
+ (void *)device : (void *)drbd_m_holder,
+ ldev->md_bdev != ldev->backing_bdev);
+ close_backing_dev(device, ldev->backing_bdev, device, true);
kfree(ldev->disk_conf);
kfree(ldev);
@@ -2126,8 +2129,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
fail:
conn_reconfig_done(connection);
if (nbc) {
- close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
- close_backing_dev(device, nbc->backing_bdev, true);
+ close_backing_dev(device, nbc->md_bdev,
+ nbc->disk_conf->meta_dev_idx < 0 ?
+ (void *)device : (void *)drbd_m_holder,
+ nbc->md_bdev != nbc->backing_bdev);
+ close_backing_dev(device, nbc->backing_bdev, device, true);
kfree(nbc);
}
kfree(new_disk_conf);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 8c2bc47de473..0c9f54197768 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -27,7 +27,6 @@
#include <uapi/linux/sched/types.h>
#include <linux/sched/signal.h>
#include <linux/pkt_sched.h>
-#define __KERNEL_SYSCALLS__
#include <linux/unistd.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cec2c20f5e59..2db9b186b977 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -402,7 +402,7 @@ static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct blk_mq_tag_set tag_sets[N_DRIVE];
-static struct block_device *opened_bdev[N_DRIVE];
+static struct gendisk *opened_disk[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
@@ -3210,13 +3210,13 @@ static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
#endif
-static int invalidate_drive(struct block_device *bdev)
+static int invalidate_drive(struct gendisk *disk)
{
/* invalidate the buffer track to force a reread */
- set_bit((long)bdev->bd_disk->private_data, &fake_change);
+ set_bit((long)disk->private_data, &fake_change);
process_fd_request();
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
+ if (disk_check_media_change(disk))
+ floppy_revalidate(disk);
return 0;
}
@@ -3251,10 +3251,11 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
floppy_type[type].size + 1;
process_fd_request();
for (cnt = 0; cnt < N_DRIVE; cnt++) {
- struct block_device *bdev = opened_bdev[cnt];
- if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
+ struct gendisk *disk = opened_disk[cnt];
+
+ if (!disk || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev, true);
+ __invalidate_device(disk->part0, true);
}
mutex_unlock(&open_lock);
} else {
@@ -3287,7 +3288,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
drive_state[current_drive].maxtrack ||
((user_params[drive].sect ^ oldStretch) &
(FD_SWAPSIDES | FD_SECTBASEMASK)))
- invalidate_drive(bdev);
+ invalidate_drive(bdev->bd_disk);
else
process_fd_request();
}
@@ -3393,8 +3394,8 @@ static bool valid_floppy_drive_params(const short autodetect[FD_AUTODETECT_SIZE]
return true;
}
-static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
- unsigned long param)
+static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
@@ -3427,7 +3428,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
return ret;
/* permission checks */
- if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) ||
+ if (((cmd & 0x40) &&
+ !(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL))) ||
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
@@ -3464,7 +3466,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
drive_state[drive].keep_data = 0;
- return invalidate_drive(bdev);
+ return invalidate_drive(bdev->bd_disk);
case FDSETPRM:
case FDDEFPRM:
return set_geometry(cmd, &inparam.g, drive, type, bdev);
@@ -3503,7 +3505,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
case FDFLUSH:
if (lock_fdc(drive))
return -EINTR;
- return invalidate_drive(bdev);
+ return invalidate_drive(bdev->bd_disk);
case FDSETEMSGTRESH:
drive_params[drive].max_errors.reporting = (unsigned short)(param & 0x0f);
return 0;
@@ -3565,7 +3567,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
return 0;
}
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
@@ -3653,8 +3655,8 @@ struct compat_floppy_write_errors {
#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
-static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd,
- struct compat_floppy_struct __user *arg)
+static int compat_set_geometry(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, struct compat_floppy_struct __user *arg)
{
struct floppy_struct v;
int drive, type;
@@ -3663,7 +3665,7 @@ static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned
BUILD_BUG_ON(offsetof(struct floppy_struct, name) !=
offsetof(struct compat_floppy_struct, name));
- if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL)))
+ if (!(mode & (BLK_OPEN_WRITE | BLK_OPEN_WRITE_IOCTL)))
return -EPERM;
memset(&v, 0, sizeof(struct floppy_struct));
@@ -3860,8 +3862,8 @@ static int compat_werrorget(int drive,
return 0;
}
-static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
- unsigned long param)
+static int fd_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
switch (cmd) {
@@ -3962,7 +3964,7 @@ static void __init config_types(void)
pr_cont("\n");
}
-static void floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk)
{
int drive = (long)disk->private_data;
@@ -3973,7 +3975,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
drive_state[drive].fd_ref = 0;
}
if (!drive_state[drive].fd_ref)
- opened_bdev[drive] = NULL;
+ opened_disk[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
}
@@ -3983,9 +3985,9 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
-static int floppy_open(struct block_device *bdev, fmode_t mode)
+static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int old_dev, new_dev;
int try;
int res = -EBUSY;
@@ -3994,7 +3996,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
old_dev = drive_state[drive].fd_device;
- if (opened_bdev[drive] && opened_bdev[drive] != bdev)
+ if (opened_disk[drive] && opened_disk[drive] != disk)
goto out2;
if (!drive_state[drive].fd_ref && (drive_params[drive].flags & FD_BROKEN_DCL)) {
@@ -4004,7 +4006,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
drive_state[drive].fd_ref++;
- opened_bdev[drive] = bdev;
+ opened_disk[drive] = disk;
res = -ENXIO;
@@ -4038,7 +4040,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
}
}
- new_dev = MINOR(bdev->bd_dev);
+ new_dev = disk->first_minor;
drive_state[drive].fd_device = new_dev;
set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]);
if (old_dev != -1 && old_dev != new_dev) {
@@ -4048,21 +4050,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (fdc_state[FDC(drive)].rawcmd == 1)
fdc_state[FDC(drive)].rawcmd = 2;
-
- if (!(mode & FMODE_NDELAY)) {
- if (mode & (FMODE_READ|FMODE_WRITE)) {
+ if (!(mode & BLK_OPEN_NDELAY)) {
+ if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
&drive_state[drive].flags);
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
+ if (disk_check_media_change(disk))
+ floppy_revalidate(disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
- if ((mode & FMODE_WRITE) &&
+ if ((mode & BLK_OPEN_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
}
@@ -4073,7 +4074,7 @@ out:
drive_state[drive].fd_ref--;
if (!drive_state[drive].fd_ref)
- opened_bdev[drive] = NULL;
+ opened_disk[drive] = NULL;
out2:
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
@@ -4147,7 +4148,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
bio_init(&bio, bdev, &bio_vec, 1, REQ_OP_READ);
- bio_add_page(&bio, page, block_size(bdev), 0);
+ __bio_add_page(&bio, page, block_size(bdev), 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
@@ -4203,7 +4204,8 @@ static int floppy_revalidate(struct gendisk *disk)
drive_state[drive].generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
- res = __floppy_read_block_0(opened_bdev[drive], drive);
+ res = __floppy_read_block_0(opened_disk[drive]->part0,
+ drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc31bb7072a2..37511d2b2caf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -990,7 +990,7 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static int loop_configure(struct loop_device *lo, fmode_t mode,
+static int loop_configure(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_config *config)
{
@@ -1014,8 +1014,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
*/
- if (!(mode & FMODE_EXCL)) {
- error = bd_prepare_to_claim(bdev, loop_configure);
+ if (!(mode & BLK_OPEN_EXCL)) {
+ error = bd_prepare_to_claim(bdev, loop_configure, NULL);
if (error)
goto out_putf;
}
@@ -1050,7 +1050,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (error)
goto out_unlock;
- if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
+ if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
lo->lo_flags |= LO_FLAGS_READ_ONLY;
@@ -1116,7 +1116,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan)
loop_reread_partitions(lo);
- if (!(mode & FMODE_EXCL))
+ if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_configure);
return 0;
@@ -1124,7 +1124,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
out_unlock:
loop_global_unlock(lo, is_loop);
out_bdev:
- if (!(mode & FMODE_EXCL))
+ if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_configure);
out_putf:
fput(file);
@@ -1528,7 +1528,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
return err;
}
-static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
@@ -1563,24 +1563,22 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
return loop_clr_fd(lo);
case LOOP_SET_STATUS:
err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
+ if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status_old(lo, argp);
- }
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
+ if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status64(lo, argp);
- }
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
case LOOP_SET_BLOCK_SIZE:
- if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
+ if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
default:
@@ -1691,7 +1689,7 @@ loop_get_status_compat(struct loop_device *lo,
return err;
}
-static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
@@ -1727,7 +1725,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
}
#endif
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void lo_release(struct gendisk *disk)
{
struct loop_device *lo = disk->private_data;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 815d77ba6381..b200950e8fb5 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3041,7 +3041,7 @@ static int rssd_disk_name_format(char *prefix,
* structure pointer.
*/
static int mtip_block_ioctl(struct block_device *dev,
- fmode_t mode,
+ blk_mode_t mode,
unsigned cmd,
unsigned long arg)
{
@@ -3079,7 +3079,7 @@ static int mtip_block_ioctl(struct block_device *dev,
* structure pointer.
*/
static int mtip_block_compat_ioctl(struct block_device *dev,
- fmode_t mode,
+ blk_mode_t mode,
unsigned cmd,
unsigned long arg)
{
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 65ecde3e2a5b..8576d696c7a2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1502,7 +1502,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return -ENOTTY;
}
-static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
+static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nbd_device *nbd = bdev->bd_disk->private_data;
@@ -1553,13 +1553,13 @@ static struct nbd_config *nbd_alloc_config(void)
return config;
}
-static int nbd_open(struct block_device *bdev, fmode_t mode)
+static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct nbd_device *nbd;
int ret = 0;
mutex_lock(&nbd_index_mutex);
- nbd = bdev->bd_disk->private_data;
+ nbd = disk->private_data;
if (!nbd) {
ret = -ENXIO;
goto out;
@@ -1587,17 +1587,17 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
if (max_part)
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
} else if (nbd_disconnected(nbd->config)) {
if (max_part)
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
}
out:
mutex_unlock(&nbd_index_mutex);
return ret;
}
-static void nbd_release(struct gendisk *disk, fmode_t mode)
+static void nbd_release(struct gendisk *disk)
{
struct nbd_device *nbd = disk->private_data;
@@ -1776,7 +1776,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
if (err == -ENOSPC)
err = -EEXIST;
} else {
- err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
+ err = idr_alloc(&nbd_index_idr, nbd, 0,
+ (MINORMASK >> part_shift) + 1, GFP_KERNEL);
if (err >= 0)
index = err;
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index b3fedafe301e..864013019d6b 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2244,6 +2244,7 @@ static void null_destroy_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
null_del_dev(nullb);
+ null_free_device_storage(dev, false);
null_free_dev(dev);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d5d7884cedd4..a1428538bda5 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -46,47 +46,34 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/pktcdvd.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
+#include <linux/backing-dev.h>
#include <linux/compat.h>
-#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/spinlock.h>
#include <linux/file.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/miscdevice.h>
#include <linux/freezer.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <linux/pktcdvd.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/backing-dev.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/nospec.h>
-#include <linux/uaccess.h>
-#define DRIVER_NAME "pktcdvd"
+#include <asm/unaligned.h>
-#define pkt_err(pd, fmt, ...) \
- pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
-#define pkt_notice(pd, fmt, ...) \
- pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
-#define pkt_info(pd, fmt, ...) \
- pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
-
-#define pkt_dbg(level, pd, fmt, ...) \
-do { \
- if (level == 2 && PACKET_DEBUG >= 2) \
- pr_notice("%s: %s():" fmt, \
- pd->name, __func__, ##__VA_ARGS__); \
- else if (level == 1 && PACKET_DEBUG >= 1) \
- pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
-} while (0)
+#define DRIVER_NAME "pktcdvd"
#define MAX_SPEED 0xffff
@@ -107,7 +94,6 @@ static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
/* forward declaration */
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
static int pkt_remove_dev(dev_t pkt_dev);
-static int pkt_seq_show(struct seq_file *m, void *p);
static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
{
@@ -253,15 +239,16 @@ static ssize_t congestion_off_store(struct device *dev,
const char *buf, size_t len)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val;
+ int val, ret;
- if (sscanf(buf, "%d", &val) == 1) {
- spin_lock(&pd->lock);
- pd->write_congestion_off = val;
- init_write_congestion_marks(&pd->write_congestion_off,
- &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- }
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&pd->lock);
+ pd->write_congestion_off = val;
+ init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
+ spin_unlock(&pd->lock);
return len;
}
static DEVICE_ATTR_RW(congestion_off);
@@ -283,15 +270,16 @@ static ssize_t congestion_on_store(struct device *dev,
const char *buf, size_t len)
{
struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val;
+ int val, ret;
- if (sscanf(buf, "%d", &val) == 1) {
- spin_lock(&pd->lock);
- pd->write_congestion_on = val;
- init_write_congestion_marks(&pd->write_congestion_off,
- &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- }
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&pd->lock);
+ pd->write_congestion_on = val;
+ init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
+ spin_unlock(&pd->lock);
return len;
}
static DEVICE_ATTR_RW(congestion_on);
@@ -319,7 +307,7 @@ static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
if (class_is_registered(&class_pktcdvd)) {
pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
MKDEV(0, 0), pd, pkt_groups,
- "%s", pd->name);
+ "%s", pd->disk->disk_name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
}
@@ -349,8 +337,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu
struct pktcdvd_device *pd = pkt_devs[idx];
if (!pd)
continue;
- n += sprintf(data+n, "%s %u:%u %u:%u\n",
- pd->name,
+ n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
+ pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
MAJOR(pd->bdev->bd_dev),
MINOR(pd->bdev->bd_dev));
@@ -428,34 +416,92 @@ static void pkt_sysfs_cleanup(void)
*******************************************************************/
-static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
+static void pkt_count_states(struct pktcdvd_device *pd, int *states)
{
- return pkt_seq_show(m, p);
+ struct packet_data *pkt;
+ int i;
+
+ for (i = 0; i < PACKET_NUM_STATES; i++)
+ states[i] = 0;
+
+ spin_lock(&pd->cdrw.active_list_lock);
+ list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+ states[pkt->state]++;
+ }
+ spin_unlock(&pd->cdrw.active_list_lock);
}
-static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
+static int pkt_seq_show(struct seq_file *m, void *p)
{
- return single_open(file, pkt_debugfs_seq_show, inode->i_private);
-}
+ struct pktcdvd_device *pd = m->private;
+ char *msg;
+ int states[PACKET_NUM_STATES];
-static const struct file_operations debug_fops = {
- .open = pkt_debugfs_fops_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev);
+
+ seq_printf(m, "\nSettings:\n");
+ seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+ if (pd->settings.write_type == 0)
+ msg = "Packet";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\twrite type:\t\t%s\n", msg);
+
+ seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+ seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+ seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+ msg = "Mode 1";
+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+ msg = "Mode 2";
+ else
+ msg = "Unknown";
+ seq_printf(m, "\tblock mode:\t\t%s\n", msg);
+
+ seq_printf(m, "\nStatistics:\n");
+ seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
+ seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
+ seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
+ seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
+ seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
+
+ seq_printf(m, "\nMisc:\n");
+ seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
+ seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
+ seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
+ seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
+ seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
+ seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+ seq_printf(m, "\nQueue state:\n");
+ seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
+ seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
+ seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
+
+ pkt_count_states(pd, states);
+ seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2], states[3], states[4], states[5]);
+
+ seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
+ pd->write_congestion_off,
+ pd->write_congestion_on);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pkt_seq);
static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
{
if (!pkt_debugfs_root)
return;
- pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
+ pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
if (!pd->dfs_d_root)
return;
- pd->dfs_f_info = debugfs_create_file("info", 0444,
- pd->dfs_d_root, pd, &debug_fops);
+ pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
+ pd, &pkt_seq_fops);
}
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
@@ -484,9 +530,11 @@ static void pkt_debugfs_cleanup(void)
static void pkt_bio_finished(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
+
BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- pkt_dbg(2, pd, "queue empty\n");
+ dev_dbg(ddev, "queue empty\n");
atomic_set(&pd->iosched.attention, 1);
wake_up(&pd->wqueue);
}
@@ -717,15 +765,16 @@ static const char *sense_key_string(__u8 index)
static void pkt_dump_sense(struct pktcdvd_device *pd,
struct packet_command *cgc)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct scsi_sense_hdr *sshdr = cgc->sshdr;
if (sshdr)
- pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+ dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
CDROM_PACKET_SIZE, cgc->cmd,
sshdr->sense_key, sshdr->asc, sshdr->ascq,
sense_key_string(sshdr->sense_key));
else
- pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
+ dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
}
/*
@@ -762,10 +811,8 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sshdr = &sshdr;
cgc.cmd[0] = GPCMD_SET_SPEED;
- cgc.cmd[2] = (read_speed >> 8) & 0xff;
- cgc.cmd[3] = read_speed & 0xff;
- cgc.cmd[4] = (write_speed >> 8) & 0xff;
- cgc.cmd[5] = write_speed & 0xff;
+ put_unaligned_be16(read_speed, &cgc.cmd[2]);
+ put_unaligned_be16(write_speed, &cgc.cmd[4]);
ret = pkt_generic_packet(pd, &cgc);
if (ret)
@@ -809,6 +856,7 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
*/
static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
if (atomic_read(&pd->iosched.attention) == 0)
return;
@@ -836,7 +884,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- pkt_dbg(2, pd, "write, waiting\n");
+ dev_dbg(ddev, "write, waiting\n");
break;
}
pkt_flush_cache(pd);
@@ -845,7 +893,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
} else {
if (!reads_queued && writes_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- pkt_dbg(2, pd, "read, waiting\n");
+ dev_dbg(ddev, "read, waiting\n");
break;
}
pd->iosched.writing = 1;
@@ -892,25 +940,27 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
- if ((pd->settings.size << 9) / CD_FRAMESIZE
- <= queue_max_segments(q)) {
+ struct device *ddev = disk_to_dev(pd->disk);
+
+ if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
- } else if ((pd->settings.size << 9) / PAGE_SIZE
- <= queue_max_segments(q)) {
+ }
+
+ if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
*/
set_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
- } else {
- pkt_err(pd, "cdrom max_phys_segments too small\n");
- return -EIO;
}
+
+ dev_err(ddev, "cdrom max_phys_segments too small\n");
+ return -EIO;
}
static void pkt_end_io_read(struct bio *bio)
@@ -919,9 +969,8 @@ static void pkt_end_io_read(struct bio *bio)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
- bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
+ dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
+ bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
if (bio->bi_status)
atomic_inc(&pkt->io_errors);
@@ -939,7 +988,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
+ dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
pd->stats.pkt_ended++;
@@ -955,6 +1004,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
*/
static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
{
+ struct device *ddev = disk_to_dev(pd->disk);
int frames_read = 0;
struct bio *bio;
int f;
@@ -983,8 +1033,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
spin_unlock(&pkt->lock);
if (pkt->cache_valid) {
- pkt_dbg(2, pd, "zone %llx cached\n",
- (unsigned long long)pkt->sector);
+ dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
goto out_account;
}
@@ -1005,8 +1054,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
- f, pkt->pages[p], offset);
+ dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
+ pkt->pages[p], offset);
if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
BUG();
@@ -1016,8 +1065,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
}
out_account:
- pkt_dbg(2, pd, "need %d frames for zone %llx\n",
- frames_read, (unsigned long long)pkt->sector);
+ dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
pd->stats.pkt_started++;
pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
}
@@ -1051,17 +1099,17 @@ static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *p
}
}
-static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
+static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
+ enum packet_data_state state)
{
-#if PACKET_DEBUG > 1
static const char *state_name[] = {
"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
};
enum packet_data_state old_state = pkt->state;
- pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
- pkt->id, (unsigned long long)pkt->sector,
- state_name[old_state], state_name[state]);
-#endif
+
+ dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
+ pkt->id, pkt->sector, state_name[old_state], state_name[state]);
+
pkt->state = state;
}
@@ -1071,6 +1119,7 @@ static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state
*/
static int pkt_handle_queue(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_data *pkt, *p;
struct bio *bio = NULL;
sector_t zone = 0; /* Suppress gcc warning */
@@ -1080,7 +1129,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
atomic_set(&pd->scan_queue, 0);
if (list_empty(&pd->cdrw.pkt_free_list)) {
- pkt_dbg(2, pd, "no pkt\n");
+ dev_dbg(ddev, "no pkt\n");
return 0;
}
@@ -1117,7 +1166,7 @@ try_next_bio:
}
spin_unlock(&pd->lock);
if (!bio) {
- pkt_dbg(2, pd, "no bio\n");
+ dev_dbg(ddev, "no bio\n");
return 0;
}
@@ -1133,12 +1182,13 @@ try_next_bio:
* to this packet.
*/
spin_lock(&pd->lock);
- pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
+ dev_dbg(ddev, "looking for zone %llx\n", zone);
while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
+ sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
+
bio = node->bio;
- pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
- get_zone(bio->bi_iter.bi_sector, pd));
- if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
+ dev_dbg(ddev, "found zone=%llx\n", tmp);
+ if (tmp != zone)
break;
pkt_rbtree_erase(pd, node);
spin_lock(&pkt->lock);
@@ -1157,7 +1207,7 @@ try_next_bio:
spin_unlock(&pd->lock);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
- pkt_set_state(pkt, PACKET_WAITING_STATE);
+ pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
atomic_set(&pkt->run_sm, 1);
spin_lock(&pd->cdrw.active_list_lock);
@@ -1209,6 +1259,7 @@ static void bio_list_copy_data(struct bio *dst, struct bio *src)
*/
static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{
+ struct device *ddev = disk_to_dev(pd->disk);
int f;
bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
@@ -1225,7 +1276,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
BUG();
}
- pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
+ dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
/*
* Fill-in bvec with data from orig_bios.
@@ -1233,11 +1284,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
spin_lock(&pkt->lock);
bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
- pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
+ pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
spin_unlock(&pkt->lock);
- pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
- pkt->write_size, (unsigned long long)pkt->sector);
+ dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
pkt->cache_valid = 1;
@@ -1265,7 +1315,9 @@ static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
{
- pkt_dbg(2, pd, "pkt %d\n", pkt->id);
+ struct device *ddev = disk_to_dev(pd->disk);
+
+ dev_dbg(ddev, "pkt %d\n", pkt->id);
for (;;) {
switch (pkt->state) {
@@ -1275,7 +1327,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
pkt->sleep_time = 0;
pkt_gather_data(pd, pkt);
- pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
+ pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
break;
case PACKET_READ_WAIT_STATE:
@@ -1283,7 +1335,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
return;
if (atomic_read(&pkt->io_errors) > 0) {
- pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
} else {
pkt_start_write(pd, pkt);
}
@@ -1294,15 +1346,15 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
return;
if (!pkt->w_bio->bi_status) {
- pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
} else {
- pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+ pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
}
break;
case PACKET_RECOVERY_STATE:
- pkt_dbg(2, pd, "No recovery possible\n");
- pkt_set_state(pkt, PACKET_FINISHED_STATE);
+ dev_dbg(ddev, "No recovery possible\n");
+ pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
break;
case PACKET_FINISHED_STATE:
@@ -1318,6 +1370,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
static void pkt_handle_packets(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_data *pkt, *next;
/*
@@ -1338,28 +1391,13 @@ static void pkt_handle_packets(struct pktcdvd_device *pd)
if (pkt->state == PACKET_FINISHED_STATE) {
list_del(&pkt->list);
pkt_put_packet_data(pd, pkt);
- pkt_set_state(pkt, PACKET_IDLE_STATE);
+ pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
atomic_set(&pd->scan_queue, 1);
}
}
spin_unlock(&pd->cdrw.active_list_lock);
}
-static void pkt_count_states(struct pktcdvd_device *pd, int *states)
-{
- struct packet_data *pkt;
- int i;
-
- for (i = 0; i < PACKET_NUM_STATES; i++)
- states[i] = 0;
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- states[pkt->state]++;
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
/*
* kcdrwd is woken up when writes have been queued for one of our
* registered devices
@@ -1367,7 +1405,9 @@ static void pkt_count_states(struct pktcdvd_device *pd, int *states)
static int kcdrwd(void *foobar)
{
struct pktcdvd_device *pd = foobar;
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_data *pkt;
+ int states[PACKET_NUM_STATES];
long min_sleep_time, residue;
set_user_nice(current, MIN_NICE);
@@ -1398,13 +1438,9 @@ static int kcdrwd(void *foobar)
goto work_to_do;
/* Otherwise, go to sleep */
- if (PACKET_DEBUG > 1) {
- int states[PACKET_NUM_STATES];
- pkt_count_states(pd, states);
- pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2],
- states[3], states[4], states[5]);
- }
+ pkt_count_states(pd, states);
+ dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+ states[0], states[1], states[2], states[3], states[4], states[5]);
min_sleep_time = MAX_SCHEDULE_TIMEOUT;
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
@@ -1412,9 +1448,9 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time;
}
- pkt_dbg(2, pd, "sleeping\n");
+ dev_dbg(ddev, "sleeping\n");
residue = schedule_timeout(min_sleep_time);
- pkt_dbg(2, pd, "wake up\n");
+ dev_dbg(ddev, "wake up\n");
/* make swsusp happy with our thread */
try_to_freeze();
@@ -1462,7 +1498,7 @@ work_to_do:
static void pkt_print_settings(struct pktcdvd_device *pd)
{
- pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+ dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
pd->settings.fp ? "Fixed" : "Variable",
pd->settings.size >> 2,
pd->settings.block_mode == 8 ? '1' : '2');
@@ -1474,8 +1510,7 @@ static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc,
cgc->cmd[0] = GPCMD_MODE_SENSE_10;
cgc->cmd[2] = page_code | (page_control << 6);
- cgc->cmd[7] = cgc->buflen >> 8;
- cgc->cmd[8] = cgc->buflen & 0xff;
+ put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
cgc->data_direction = CGC_DATA_READ;
return pkt_generic_packet(pd, cgc);
}
@@ -1486,8 +1521,7 @@ static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc
memset(cgc->buffer, 0, 2);
cgc->cmd[0] = GPCMD_MODE_SELECT_10;
cgc->cmd[1] = 0x10; /* PF */
- cgc->cmd[7] = cgc->buflen >> 8;
- cgc->cmd[8] = cgc->buflen & 0xff;
+ put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
cgc->data_direction = CGC_DATA_WRITE;
return pkt_generic_packet(pd, cgc);
}
@@ -1528,8 +1562,7 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type,
init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
cgc.cmd[1] = type & 3;
- cgc.cmd[4] = (track & 0xff00) >> 8;
- cgc.cmd[5] = track & 0xff;
+ put_unaligned_be16(track, &cgc.cmd[4]);
cgc.cmd[8] = 8;
cgc.quiet = 1;
@@ -1590,6 +1623,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
*/
static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
write_param_page *wp;
@@ -1609,8 +1643,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
return ret;
}
- size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
- pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+ size = 2 + get_unaligned_be16(&buffer[0]);
+ pd->mode_offset = get_unaligned_be16(&buffer[6]);
if (size > sizeof(buffer))
size = sizeof(buffer);
@@ -1656,7 +1690,7 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
/*
* paranoia
*/
- pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
+ dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
return 1;
}
wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
@@ -1677,6 +1711,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
*/
static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
{
+ struct device *ddev = disk_to_dev(pd->disk);
+
switch (pd->mmc3_profile) {
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
@@ -1701,7 +1737,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
if (ti->rt == 1 && ti->blank == 0)
return 1;
- pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 0;
}
@@ -1710,6 +1746,8 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
*/
static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
{
+ struct device *ddev = disk_to_dev(pd->disk);
+
switch (pd->mmc3_profile) {
case 0x0a: /* CD-RW */
case 0xffff: /* MMC3 not supported */
@@ -1719,8 +1757,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
case 0x12: /* DVD-RAM */
return 1;
default:
- pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
- pd->mmc3_profile);
+ dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
return 0;
}
@@ -1729,22 +1766,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
* but i'm not sure, should we leave this to user apps? probably.
*/
if (di->disc_type == 0xff) {
- pkt_notice(pd, "unknown disc - no track?\n");
+ dev_notice(ddev, "unknown disc - no track?\n");
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
- pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
+ dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
return 0;
}
if (di->erasable == 0) {
- pkt_notice(pd, "disc not erasable\n");
+ dev_err(ddev, "disc not erasable\n");
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
- pkt_err(pd, "can't write to last track (reserved)\n");
+ dev_err(ddev, "can't write to last track (reserved)\n");
return 0;
}
@@ -1753,6 +1790,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
unsigned char buf[12];
disc_information di;
@@ -1763,14 +1801,14 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
cgc.cmd[8] = 8;
ret = pkt_generic_packet(pd, &cgc);
- pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
+ pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
memset(&di, 0, sizeof(disc_information));
memset(&ti, 0, sizeof(track_information));
ret = pkt_get_disc_info(pd, &di);
if (ret) {
- pkt_err(pd, "failed get_disc\n");
+ dev_err(ddev, "failed get_disc\n");
return ret;
}
@@ -1782,12 +1820,12 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
ret = pkt_get_track_info(pd, track, 1, &ti);
if (ret) {
- pkt_err(pd, "failed get_track\n");
+ dev_err(ddev, "failed get_track\n");
return ret;
}
if (!pkt_writable_track(pd, &ti)) {
- pkt_err(pd, "can't write to this track\n");
+ dev_err(ddev, "can't write to this track\n");
return -EROFS;
}
@@ -1797,11 +1835,11 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
*/
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
- pkt_notice(pd, "detected zero packet size!\n");
+ dev_notice(ddev, "detected zero packet size!\n");
return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
- pkt_err(pd, "packet size is too big\n");
+ dev_err(ddev, "packet size is too big\n");
return -EROFS;
}
pd->settings.fp = ti.fp;
@@ -1843,7 +1881,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
pd->settings.block_mode = PACKET_BLOCK_MODE2;
break;
default:
- pkt_err(pd, "unknown data mode\n");
+ dev_err(ddev, "unknown data mode\n");
return -EROFS;
}
return 0;
@@ -1854,6 +1892,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
*/
static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
unsigned char buf[64];
@@ -1880,13 +1919,13 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
*/
buf[pd->mode_offset + 10] |= (set << 2);
- cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+ cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
ret = pkt_mode_select(pd, &cgc);
if (ret) {
- pkt_err(pd, "write caching control failed\n");
+ dev_err(ddev, "write caching control failed\n");
pkt_dump_sense(pd, &cgc);
} else if (!ret && set)
- pkt_notice(pd, "enabled write caching\n");
+ dev_notice(ddev, "enabled write caching\n");
return ret;
}
@@ -1935,12 +1974,12 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
* Speed Performance Descriptor Block", use the information
* in the first block. (contains the highest speed)
*/
- int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
+ int num_spdb = get_unaligned_be16(&cap_buf[30]);
if (num_spdb > 0)
offset = 34;
}
- *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
+ *write_speed = get_unaligned_be16(&cap_buf[offset]);
return 0;
}
@@ -1967,6 +2006,7 @@ static char us_clv_to_speed[16] = {
static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
unsigned *speed)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
unsigned char buf[64];
@@ -1984,7 +2024,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
pkt_dump_sense(pd, &cgc);
return ret;
}
- size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
+ size = 2 + get_unaligned_be16(&buf[0]);
if (size > sizeof(buf))
size = sizeof(buf);
@@ -2001,11 +2041,11 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
}
if (!(buf[6] & 0x40)) {
- pkt_notice(pd, "disc type is not CD-RW\n");
+ dev_notice(ddev, "disc type is not CD-RW\n");
return 1;
}
if (!(buf[6] & 0x4)) {
- pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
+ dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
@@ -2025,25 +2065,26 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
*speed = us_clv_to_speed[sp];
break;
default:
- pkt_notice(pd, "unknown disc sub-type %d\n", st);
+ dev_notice(ddev, "unknown disc sub-type %d\n", st);
return 1;
}
if (*speed) {
- pkt_info(pd, "maximum media speed: %d\n", *speed);
+ dev_info(ddev, "maximum media speed: %d\n", *speed);
return 0;
} else {
- pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
+ dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
return 1;
}
}
static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
struct packet_command cgc;
struct scsi_sense_hdr sshdr;
int ret;
- pkt_dbg(2, pd, "Performing OPC\n");
+ dev_dbg(ddev, "Performing OPC\n");
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.sshdr = &sshdr;
@@ -2058,18 +2099,19 @@ static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
static int pkt_open_write(struct pktcdvd_device *pd)
{
+ struct device *ddev = disk_to_dev(pd->disk);
int ret;
unsigned int write_speed, media_write_speed, read_speed;
ret = pkt_probe_settings(pd);
if (ret) {
- pkt_dbg(2, pd, "failed probe\n");
+ dev_dbg(ddev, "failed probe\n");
return ret;
}
ret = pkt_set_write_settings(pd);
if (ret) {
- pkt_dbg(1, pd, "failed saving write settings\n");
+ dev_notice(ddev, "failed saving write settings\n");
return -EIO;
}
@@ -2082,30 +2124,29 @@ static int pkt_open_write(struct pktcdvd_device *pd)
case 0x13: /* DVD-RW */
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
- pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
+ dev_notice(ddev, "write speed %ukB/s\n", write_speed);
break;
default:
ret = pkt_media_speed(pd, &media_write_speed);
if (ret)
media_write_speed = 16;
write_speed = min(write_speed, media_write_speed * 177);
- pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
+ dev_notice(ddev, "write speed %ux\n", write_speed / 176);
break;
}
read_speed = write_speed;
ret = pkt_set_speed(pd, write_speed, read_speed);
if (ret) {
- pkt_dbg(1, pd, "couldn't set write speed\n");
+ dev_notice(ddev, "couldn't set write speed\n");
return -EIO;
}
pd->write_speed = write_speed;
pd->read_speed = read_speed;
ret = pkt_perform_opc(pd);
- if (ret) {
- pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
- }
+ if (ret)
+ dev_notice(ddev, "Optimum Power Calibration failed\n");
return 0;
}
@@ -2113,8 +2154,9 @@ static int pkt_open_write(struct pktcdvd_device *pd)
/*
* called at open time.
*/
-static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
+static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
{
+ struct device *ddev = disk_to_dev(pd->disk);
int ret;
long lba;
struct request_queue *q;
@@ -2125,7 +2167,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
- bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
+ bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
goto out;
@@ -2133,7 +2175,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
ret = pkt_get_last_written(pd, &lba);
if (ret) {
- pkt_err(pd, "pkt_get_last_written failed\n");
+ dev_err(ddev, "pkt_get_last_written failed\n");
goto out_putdev;
}
@@ -2162,17 +2204,17 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- pkt_err(pd, "not enough memory for buffers\n");
+ dev_err(ddev, "not enough memory for buffers\n");
ret = -ENOMEM;
goto out_putdev;
}
- pkt_info(pd, "%lukB available on disc\n", lba << 1);
+ dev_info(ddev, "%lukB available on disc\n", lba << 1);
}
return 0;
out_putdev:
- blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
+ blkdev_put(bdev, pd);
out:
return ret;
}
@@ -2183,13 +2225,15 @@ out:
*/
static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
{
+ struct device *ddev = disk_to_dev(pd->disk);
+
if (flush && pkt_flush_cache(pd))
- pkt_dbg(1, pd, "not flushing cache\n");
+ dev_notice(ddev, "not flushing cache\n");
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
+ blkdev_put(pd->bdev, pd);
pkt_shrink_pktlist(pd);
}
@@ -2203,14 +2247,14 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
return pkt_devs[dev_minor];
}
-static int pkt_open(struct block_device *bdev, fmode_t mode)
+static int pkt_open(struct gendisk *disk, blk_mode_t mode)
{
struct pktcdvd_device *pd = NULL;
int ret;
mutex_lock(&pktcdvd_mutex);
mutex_lock(&ctl_mutex);
- pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
+ pd = pkt_find_dev_from_minor(disk->first_minor);
if (!pd) {
ret = -ENODEV;
goto out;
@@ -2219,22 +2263,21 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
pd->refcnt++;
if (pd->refcnt > 1) {
- if ((mode & FMODE_WRITE) &&
+ if ((mode & BLK_OPEN_WRITE) &&
!test_bit(PACKET_WRITABLE, &pd->flags)) {
ret = -EBUSY;
goto out_dec;
}
} else {
- ret = pkt_open_dev(pd, mode & FMODE_WRITE);
+ ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
if (ret)
goto out_dec;
/*
* needed here as well, since ext2 (among others) may change
* the blocksize at mount time
*/
- set_blocksize(bdev, CD_FRAMESIZE);
+ set_blocksize(disk->part0, CD_FRAMESIZE);
}
-
mutex_unlock(&ctl_mutex);
mutex_unlock(&pktcdvd_mutex);
return 0;
@@ -2247,7 +2290,7 @@ out:
return ret;
}
-static void pkt_close(struct gendisk *disk, fmode_t mode)
+static void pkt_release(struct gendisk *disk)
{
struct pktcdvd_device *pd = disk->private_data;
@@ -2385,15 +2428,15 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
static void pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
+ struct device *ddev = disk_to_dev(pd->disk);
struct bio *split;
bio = bio_split_to_limits(bio);
if (!bio)
return;
- pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
- (unsigned long long)bio->bi_iter.bi_sector,
- (unsigned long long)bio_end_sector(bio));
+ dev_dbg(ddev, "start = %6llx stop = %6llx\n",
+ bio->bi_iter.bi_sector, bio_end_sector(bio));
/*
* Clone READ bios so we can have our own bi_end_io callback.
@@ -2404,13 +2447,12 @@ static void pkt_submit_bio(struct bio *bio)
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- pkt_notice(pd, "WRITE for ro device (%llu)\n",
- (unsigned long long)bio->bi_iter.bi_sector);
+ dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
goto end_io;
}
if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
- pkt_err(pd, "wrong bio size\n");
+ dev_err(ddev, "wrong bio size\n");
goto end_io;
}
@@ -2446,74 +2488,15 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
q->queuedata = pd;
}
-static int pkt_seq_show(struct seq_file *m, void *p)
-{
- struct pktcdvd_device *pd = m->private;
- char *msg;
- int states[PACKET_NUM_STATES];
-
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
-
- seq_printf(m, "\nSettings:\n");
- seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
-
- if (pd->settings.write_type == 0)
- msg = "Packet";
- else
- msg = "Unknown";
- seq_printf(m, "\twrite type:\t\t%s\n", msg);
-
- seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
- seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
-
- seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
-
- if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
- msg = "Mode 1";
- else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
- msg = "Mode 2";
- else
- msg = "Unknown";
- seq_printf(m, "\tblock mode:\t\t%s\n", msg);
-
- seq_printf(m, "\nStatistics:\n");
- seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
- seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
- seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
- seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
- seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
-
- seq_printf(m, "\nMisc:\n");
- seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
- seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
- seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
- seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
- seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
- seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
-
- seq_printf(m, "\nQueue state:\n");
- seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
- seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
- seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
-
- pkt_count_states(pd, states);
- seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
- pd->write_congestion_off,
- pd->write_congestion_on);
- return 0;
-}
-
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
+ struct device *ddev = disk_to_dev(pd->disk);
int i;
struct block_device *bdev;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
- pkt_err(pd, "recursive setup not allowed\n");
+ dev_err(ddev, "recursive setup not allowed\n");
return -EBUSY;
}
for (i = 0; i < MAX_WRITERS; i++) {
@@ -2521,21 +2504,22 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (!pd2)
continue;
if (pd2->bdev->bd_dev == dev) {
- pkt_err(pd, "%pg already setup\n", pd2->bdev);
+ dev_err(ddev, "%pg already setup\n", pd2->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
- pkt_err(pd, "can't chain pktcdvd devices\n");
+ dev_err(ddev, "can't chain pktcdvd devices\n");
return -EBUSY;
}
}
- bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
+ bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL,
+ NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
sdev = scsi_device_from_queue(bdev->bd_disk->queue);
if (!sdev) {
- blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ blkdev_put(bdev, NULL);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
@@ -2549,30 +2533,31 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
pkt_init_queue(pd);
atomic_set(&pd->cdrw.pending_bios, 0);
- pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
+ pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
if (IS_ERR(pd->cdrw.thread)) {
- pkt_err(pd, "can't start kernel thread\n");
+ dev_err(ddev, "can't start kernel thread\n");
goto out_mem;
}
- proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
- pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
+ proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
+ dev_notice(ddev, "writer mapped to %pg\n", bdev);
return 0;
out_mem:
- blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ blkdev_put(bdev, NULL);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
}
-static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned int cmd, unsigned long arg)
{
struct pktcdvd_device *pd = bdev->bd_disk->private_data;
+ struct device *ddev = disk_to_dev(pd->disk);
int ret;
- pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
- cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
mutex_lock(&pktcdvd_mutex);
switch (cmd) {
@@ -2598,7 +2583,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
break;
default:
- pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
+ dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
ret = -ENOTTY;
}
mutex_unlock(&pktcdvd_mutex);
@@ -2631,7 +2616,7 @@ static const struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.submit_bio = pkt_submit_bio,
.open = pkt_open,
- .release = pkt_close,
+ .release = pkt_release,
.ioctl = pkt_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = pkt_check_events,
@@ -2676,7 +2661,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
spin_lock_init(&pd->iosched.lock);
bio_list_init(&pd->iosched.read_queue);
bio_list_init(&pd->iosched.write_queue);
- sprintf(pd->name, DRIVER_NAME"%d", idx);
init_waitqueue_head(&pd->wqueue);
pd->bio_queue = RB_ROOT;
@@ -2693,7 +2677,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->minors = 1;
disk->fops = &pktcdvd_ops;
disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
- strcpy(disk->disk_name, pd->name);
+ snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
disk->private_data = pd;
pd->pkt_dev = MKDEV(pktdev_major, idx);
@@ -2735,6 +2719,7 @@ out_mutex:
static int pkt_remove_dev(dev_t pkt_dev)
{
struct pktcdvd_device *pd;
+ struct device *ddev;
int idx;
int ret = 0;
@@ -2755,6 +2740,9 @@ static int pkt_remove_dev(dev_t pkt_dev)
ret = -EBUSY;
goto out;
}
+
+ ddev = disk_to_dev(pd->disk);
+
if (!IS_ERR(pd->cdrw.thread))
kthread_stop(pd->cdrw.thread);
@@ -2763,10 +2751,10 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
+ blkdev_put(pd->bdev, NULL);
- remove_proc_entry(pd->name, pkt_proc);
- pkt_dbg(1, pd, "writer unmapped\n");
+ remove_proc_entry(pd->disk->disk_name, pkt_proc);
+ dev_notice(ddev, "writer unmapped\n");
del_gendisk(pd->disk);
put_disk(pd->disk);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 84ad3b17956f..bd0e075a5d89 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -660,9 +660,9 @@ static bool pending_result_dec(struct pending_result *pending, int *result)
return true;
}
-static int rbd_open(struct block_device *bdev, fmode_t mode)
+static int rbd_open(struct gendisk *disk, blk_mode_t mode)
{
- struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
+ struct rbd_device *rbd_dev = disk->private_data;
bool removing = false;
spin_lock_irq(&rbd_dev->lock);
@@ -679,7 +679,7 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static void rbd_release(struct gendisk *disk, fmode_t mode)
+static void rbd_release(struct gendisk *disk)
{
struct rbd_device *rbd_dev = disk->private_data;
unsigned long open_count_before;
@@ -1334,14 +1334,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
/*
* Must be called after rbd_obj_calc_img_extents().
*/
-static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
+static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
{
- if (!obj_req->num_img_extents ||
- (rbd_obj_is_entire(obj_req) &&
- !obj_req->img_request->snapc->num_snaps))
- return false;
+ rbd_assert(obj_req->img_request->snapc);
- return true;
+ if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
+ dout("%s %p objno %llu discard\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+ return;
+ }
+
+ if (!obj_req->num_img_extents) {
+ dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+ return;
+ }
+
+ if (rbd_obj_is_entire(obj_req) &&
+ !obj_req->img_request->snapc->num_snaps) {
+ dout("%s %p objno %llu entire\n", __func__, obj_req,
+ obj_req->ex.oe_objno);
+ return;
+ }
+
+ obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
}
static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
@@ -1442,6 +1458,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
{
+ rbd_assert(obj_req->img_request->snapc);
return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
num_ops);
}
@@ -1578,15 +1595,18 @@ static void rbd_img_request_init(struct rbd_img_request *img_request,
mutex_init(&img_request->state_mutex);
}
+/*
+ * Only snap_id is captured here, for reads. For writes, snapshot
+ * context is captured in rbd_img_object_requests() after exclusive
+ * lock is ensured to be held.
+ */
static void rbd_img_capture_header(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
lockdep_assert_held(&rbd_dev->header_rwsem);
- if (rbd_img_is_write(img_req))
- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
- else
+ if (!rbd_img_is_write(img_req))
img_req->snap_id = rbd_dev->spec->snap_id;
if (rbd_dev_parent_get(rbd_dev))
@@ -2233,9 +2253,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
if (ret)
return ret;
- if (rbd_obj_copyup_enabled(obj_req))
- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
-
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
@@ -2341,8 +2358,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
if (ret)
return ret;
- if (rbd_obj_copyup_enabled(obj_req))
- obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
if (!obj_req->num_img_extents) {
obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
if (rbd_obj_is_entire(obj_req))
@@ -3286,6 +3301,7 @@ again:
case RBD_OBJ_WRITE_START:
rbd_assert(!*result);
+ rbd_obj_set_copyup_enabled(obj_req);
if (rbd_obj_write_is_noop(obj_req))
return true;
@@ -3472,9 +3488,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
static void rbd_img_object_requests(struct rbd_img_request *img_req)
{
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
struct rbd_obj_request *obj_req;
rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
+ rbd_assert(!need_exclusive_lock(img_req) ||
+ __rbd_is_lock_owner(rbd_dev));
+
+ if (rbd_img_is_write(img_req)) {
+ rbd_assert(!img_req->snapc);
+ down_read(&rbd_dev->header_rwsem);
+ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ up_read(&rbd_dev->header_rwsem);
+ }
for_each_obj_request(img_req, obj_req) {
int result = 0;
@@ -3492,7 +3518,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req)
static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
{
- struct rbd_device *rbd_dev = img_req->rbd_dev;
int ret;
again:
@@ -3513,9 +3538,6 @@ again:
if (*result)
return true;
- rbd_assert(!need_exclusive_lock(img_req) ||
- __rbd_is_lock_owner(rbd_dev));
-
rbd_img_object_requests(img_req);
if (!img_req->pending.num_pending) {
*result = img_req->pending.result;
@@ -3977,6 +3999,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
{
int ret;
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ return ret;
+
if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
ret = rbd_object_map_open(rbd_dev);
if (ret)
diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile
index 40b31630822c..208e5f865497 100644
--- a/drivers/block/rnbd/Makefile
+++ b/drivers/block/rnbd/Makefile
@@ -3,13 +3,11 @@
ccflags-y := -I$(srctree)/drivers/infiniband/ulp/rtrs
rnbd-client-y := rnbd-clt.o \
- rnbd-clt-sysfs.o \
- rnbd-common.o
+ rnbd-clt-sysfs.o
CFLAGS_rnbd-srv-trace.o = -I$(src)
-rnbd-server-y := rnbd-common.o \
- rnbd-srv.o \
+rnbd-server-y := rnbd-srv.o \
rnbd-srv-sysfs.o \
rnbd-srv-trace.o
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 8c6087949794..c36d8b1ceeed 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -24,7 +24,9 @@
#include "rnbd-clt.h"
static struct device *rnbd_dev;
-static struct class *rnbd_dev_class;
+static const struct class rnbd_dev_class = {
+ .name = "rnbd_client",
+};
static struct kobject *rnbd_devs_kobj;
enum {
@@ -278,7 +280,7 @@ static ssize_t access_mode_show(struct kobject *kobj,
dev = container_of(kobj, struct rnbd_clt_dev, kobj);
- return sysfs_emit(page, "%s\n", rnbd_access_mode_str(dev->access_mode));
+ return sysfs_emit(page, "%s\n", rnbd_access_modes[dev->access_mode].str);
}
static struct kobj_attribute rnbd_clt_access_mode =
@@ -596,7 +598,7 @@ static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
pr_info("Mapping device %s on session %s, (access_mode: %s, nr_poll_queues: %d)\n",
pathname, sessname,
- rnbd_access_mode_str(access_mode),
+ rnbd_access_modes[access_mode].str,
nr_poll_queues);
dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
@@ -646,11 +648,11 @@ int rnbd_clt_create_sysfs_files(void)
{
int err;
- rnbd_dev_class = class_create("rnbd-client");
- if (IS_ERR(rnbd_dev_class))
- return PTR_ERR(rnbd_dev_class);
+ err = class_register(&rnbd_dev_class);
+ if (err)
+ return err;
- rnbd_dev = device_create_with_groups(rnbd_dev_class, NULL,
+ rnbd_dev = device_create_with_groups(&rnbd_dev_class, NULL,
MKDEV(0, 0), NULL,
default_attr_groups, "ctl");
if (IS_ERR(rnbd_dev)) {
@@ -666,9 +668,9 @@ int rnbd_clt_create_sysfs_files(void)
return 0;
dev_destroy:
- device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ device_destroy(&rnbd_dev_class, MKDEV(0, 0));
cls_destroy:
- class_destroy(rnbd_dev_class);
+ class_unregister(&rnbd_dev_class);
return err;
}
@@ -678,6 +680,6 @@ void rnbd_clt_destroy_sysfs_files(void)
sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
kobject_del(rnbd_devs_kobj);
kobject_put(rnbd_devs_kobj);
- device_destroy(rnbd_dev_class, MKDEV(0, 0));
- class_destroy(rnbd_dev_class);
+ device_destroy(&rnbd_dev_class, MKDEV(0, 0));
+ class_unregister(&rnbd_dev_class);
}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 5eb8c7855970..b0550b68645d 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -921,11 +921,11 @@ rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
return sess;
}
-static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
+static int rnbd_client_open(struct gendisk *disk, blk_mode_t mode)
{
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
- if (get_disk_ro(dev->gd) && (mode & FMODE_WRITE))
+ if (get_disk_ro(dev->gd) && (mode & BLK_OPEN_WRITE))
return -EPERM;
if (dev->dev_state == DEV_STATE_UNMAPPED ||
@@ -935,7 +935,7 @@ static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
return 0;
}
-static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
+static void rnbd_client_release(struct gendisk *gen)
{
struct rnbd_clt_dev *dev = gen->private_data;
diff --git a/drivers/block/rnbd/rnbd-common.c b/drivers/block/rnbd/rnbd-common.c
deleted file mode 100644
index 596c3f732403..000000000000
--- a/drivers/block/rnbd/rnbd-common.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RDMA Network Block Driver
- *
- * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
- * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
- * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
- */
-#include "rnbd-proto.h"
-
-const char *rnbd_access_mode_str(enum rnbd_access_mode mode)
-{
- switch (mode) {
- case RNBD_ACCESS_RO:
- return "ro";
- case RNBD_ACCESS_RW:
- return "rw";
- case RNBD_ACCESS_MIGRATION:
- return "migration";
- default:
- return "unknown";
- }
-}
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index da1d0542d7e2..e32f8f2c868a 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -61,6 +61,15 @@ enum rnbd_access_mode {
RNBD_ACCESS_MIGRATION,
};
+static const __maybe_unused struct {
+ enum rnbd_access_mode mode;
+ const char *str;
+} rnbd_access_modes[] = {
+ [RNBD_ACCESS_RO] = {RNBD_ACCESS_RO, "ro"},
+ [RNBD_ACCESS_RW] = {RNBD_ACCESS_RW, "rw"},
+ [RNBD_ACCESS_MIGRATION] = {RNBD_ACCESS_MIGRATION, "migration"},
+};
+
/**
* struct rnbd_msg_sess_info - initial session info from client to server
* @hdr: message header
@@ -185,7 +194,6 @@ struct rnbd_msg_io {
enum rnbd_io_flags {
/* Operations */
-
RNBD_OP_READ = 0,
RNBD_OP_WRITE = 1,
RNBD_OP_FLUSH = 2,
@@ -193,15 +201,9 @@ enum rnbd_io_flags {
RNBD_OP_SECURE_ERASE = 4,
RNBD_OP_WRITE_SAME = 5,
- RNBD_OP_LAST,
-
/* Flags */
-
RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1),
-
- RNBD_F_ALL = (RNBD_F_SYNC | RNBD_F_FUA)
-
};
static inline u32 rnbd_op(u32 flags)
@@ -214,21 +216,6 @@ static inline u32 rnbd_flags(u32 flags)
return flags & ~RNBD_OP_MASK;
}
-static inline bool rnbd_flags_supported(u32 flags)
-{
- u32 op;
-
- op = rnbd_op(flags);
- flags = rnbd_flags(flags);
-
- if (op >= RNBD_OP_LAST)
- return false;
- if (flags & ~RNBD_F_ALL)
- return false;
-
- return true;
-}
-
static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
{
blk_opf_t bio_opf;
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
index d5d9267e1fa5..cba6ba43c2c2 100644
--- a/drivers/block/rnbd/rnbd-srv-sysfs.c
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -9,7 +9,6 @@
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
-#include <uapi/linux/limits.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/stat.h>
@@ -20,7 +19,9 @@
#include "rnbd-srv.h"
static struct device *rnbd_dev;
-static struct class *rnbd_dev_class;
+static const struct class rnbd_dev_class = {
+ .name = "rnbd-server",
+};
static struct kobject *rnbd_devs_kobj;
static void rnbd_srv_dev_release(struct kobject *kobj)
@@ -88,8 +89,7 @@ static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
- return sysfs_emit(page, "%d\n",
- !(sess_dev->open_flags & FMODE_WRITE));
+ return sysfs_emit(page, "%d\n", sess_dev->readonly);
}
static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
@@ -104,7 +104,7 @@ static ssize_t access_mode_show(struct kobject *kobj,
sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
return sysfs_emit(page, "%s\n",
- rnbd_access_mode_str(sess_dev->access_mode));
+ rnbd_access_modes[sess_dev->access_mode].str);
}
static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
@@ -215,12 +215,12 @@ int rnbd_srv_create_sysfs_files(void)
{
int err;
- rnbd_dev_class = class_create("rnbd-server");
- if (IS_ERR(rnbd_dev_class))
- return PTR_ERR(rnbd_dev_class);
+ err = class_register(&rnbd_dev_class);
+ if (err)
+ return err;
- rnbd_dev = device_create(rnbd_dev_class, NULL,
- MKDEV(0, 0), NULL, "ctl");
+ rnbd_dev = device_create(&rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(rnbd_dev)) {
err = PTR_ERR(rnbd_dev);
goto cls_destroy;
@@ -234,9 +234,9 @@ int rnbd_srv_create_sysfs_files(void)
return 0;
dev_destroy:
- device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ device_destroy(&rnbd_dev_class, MKDEV(0, 0));
cls_destroy:
- class_destroy(rnbd_dev_class);
+ class_unregister(&rnbd_dev_class);
return err;
}
@@ -245,6 +245,6 @@ void rnbd_srv_destroy_sysfs_files(void)
{
kobject_del(rnbd_devs_kobj);
kobject_put(rnbd_devs_kobj);
- device_destroy(rnbd_dev_class, MKDEV(0, 0));
- class_destroy(rnbd_dev_class);
+ device_destroy(&rnbd_dev_class, MKDEV(0, 0));
+ class_unregister(&rnbd_dev_class);
}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 2cfed2e58d64..c186df0ec641 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -96,7 +96,7 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
ret = kref_get_unless_zero(&sess_dev->kref);
rcu_read_unlock();
- if (!sess_dev || !ret)
+ if (!ret)
return ERR_PTR(-ENXIO);
return sess_dev;
@@ -180,7 +180,7 @@ static void destroy_device(struct kref *kref)
WARN_ONCE(!list_empty(&dev->sess_dev_list),
"Device %s is being destroyed but still in use!\n",
- dev->id);
+ dev->name);
spin_lock(&dev_lock);
list_del(&dev->list);
@@ -219,10 +219,10 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- blkdev_put(sess_dev->bdev, sess_dev->open_flags);
+ blkdev_put(sess_dev->bdev, NULL);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
- if (sess_dev->open_flags & FMODE_WRITE)
+ if (!sess_dev->readonly)
sess_dev->dev->open_write_cnt--;
mutex_unlock(&sess_dev->dev->lock);
@@ -356,7 +356,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen);
-static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
+static void process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen);
@@ -384,8 +384,7 @@ static int rnbd_srv_rdma_ev(void *priv, struct rtrs_srv_op *id,
ret = process_msg_open(srv_sess, usr, usrlen, data, datalen);
break;
case RNBD_MSG_SESS_INFO:
- ret = process_msg_sess_info(srv_sess, usr, usrlen, data,
- datalen);
+ process_msg_sess_info(srv_sess, usr, usrlen, data, datalen);
break;
default:
pr_warn("Received unexpected message type %d from session %s\n",
@@ -431,7 +430,7 @@ static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(struct block_device *bdev)
if (!dev)
return ERR_PTR(-ENOMEM);
- snprintf(dev->id, sizeof(dev->id), "%pg", bdev);
+ snprintf(dev->name, sizeof(dev->name), "%pg", bdev);
kref_init(&dev->kref);
INIT_LIST_HEAD(&dev->sess_dev_list);
mutex_init(&dev->lock);
@@ -446,7 +445,7 @@ rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev *new_dev)
spin_lock(&dev_lock);
list_for_each_entry(dev, &dev_list, list) {
- if (!strncmp(dev->id, new_dev->id, sizeof(dev->id))) {
+ if (!strncmp(dev->name, new_dev->name, sizeof(dev->name))) {
if (!kref_get_unless_zero(&dev->kref))
/*
* We lost the race, device is almost dead.
@@ -467,39 +466,38 @@ static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
struct rnbd_srv_session *srv_sess,
enum rnbd_access_mode access_mode)
{
- int ret = -EPERM;
+ int ret = 0;
mutex_lock(&srv_dev->lock);
switch (access_mode) {
case RNBD_ACCESS_RO:
- ret = 0;
break;
case RNBD_ACCESS_RW:
if (srv_dev->open_write_cnt == 0) {
srv_dev->open_write_cnt++;
- ret = 0;
} else {
pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
- srv_dev->id, srv_sess->sessname,
+ srv_dev->name, srv_sess->sessname,
srv_dev->open_write_cnt,
- rnbd_access_mode_str(access_mode));
+ rnbd_access_modes[access_mode].str);
+ ret = -EPERM;
}
break;
case RNBD_ACCESS_MIGRATION:
if (srv_dev->open_write_cnt < 2) {
srv_dev->open_write_cnt++;
- ret = 0;
} else {
pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
- srv_dev->id, srv_sess->sessname,
+ srv_dev->name, srv_sess->sessname,
srv_dev->open_write_cnt,
- rnbd_access_mode_str(access_mode));
+ rnbd_access_modes[access_mode].str);
+ ret = -EPERM;
}
break;
default:
pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
- srv_dev->id, srv_sess->sessname, access_mode);
+ srv_dev->name, srv_sess->sessname, access_mode);
ret = -EINVAL;
}
@@ -561,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct block_device *bdev, fmode_t open_flags,
+ struct block_device *bdev, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -576,7 +574,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
sdev->bdev = bdev;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
- sdev->open_flags = open_flags;
+ sdev->readonly = readonly;
sdev->access_mode = open_msg->access_mode;
return sdev;
@@ -631,7 +629,7 @@ static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
return full_path;
}
-static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
+static void process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen)
{
@@ -644,8 +642,6 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
rsp->ver = srv_sess->ver;
-
- return 0;
}
/**
@@ -681,15 +677,14 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
struct block_device *bdev;
- fmode_t open_flags;
+ blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
trace_process_msg_open(srv_sess, open_msg);
- open_flags = FMODE_READ;
if (open_msg->access_mode != RNBD_ACCESS_RO)
- open_flags |= FMODE_WRITE;
+ open_flags |= BLK_OPEN_WRITE;
mutex_lock(&srv_sess->lock);
@@ -719,7 +714,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- bdev = blkdev_get_by_path(full_path, open_flags, THIS_MODULE);
+ bdev = blkdev_get_by_path(full_path, open_flags, NULL, NULL);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
@@ -736,9 +731,9 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto blkdev_put;
}
- srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
- bdev, open_flags,
- srv_dev);
+ srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, bdev,
+ open_msg->access_mode == RNBD_ACCESS_RO,
+ srv_dev);
if (IS_ERR(srv_sess_dev)) {
pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev));
@@ -774,7 +769,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
mutex_unlock(&srv_dev->lock);
- rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
+ rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->name);
kfree(full_path);
@@ -795,7 +790,7 @@ srv_dev_put:
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
- blkdev_put(bdev, open_flags);
+ blkdev_put(bdev, NULL);
free_path:
kfree(full_path);
reject:
@@ -808,7 +803,7 @@ static struct rtrs_srv_ctx *rtrs_ctx;
static struct rtrs_srv_ops rtrs_ops;
static int __init rnbd_srv_init_module(void)
{
- int err;
+ int err = 0;
BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
@@ -822,19 +817,17 @@ static int __init rnbd_srv_init_module(void)
};
rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr);
if (IS_ERR(rtrs_ctx)) {
- err = PTR_ERR(rtrs_ctx);
pr_err("rtrs_srv_open(), err: %d\n", err);
- return err;
+ return PTR_ERR(rtrs_ctx);
}
err = rnbd_srv_create_sysfs_files();
if (err) {
pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err);
rtrs_srv_close(rtrs_ctx);
- return err;
}
- return 0;
+ return err;
}
static void __exit rnbd_srv_cleanup_module(void)
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index f5962fd31d62..1027656dedb0 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -35,7 +35,7 @@ struct rnbd_srv_dev {
struct kobject dev_kobj;
struct kobject *dev_sessions_kobj;
struct kref kref;
- char id[NAME_MAX];
+ char name[NAME_MAX];
/* List of rnbd_srv_sess_dev structs */
struct list_head sess_dev_list;
struct mutex lock;
@@ -52,7 +52,7 @@ struct rnbd_srv_sess_dev {
struct kobject kobj;
u32 device_id;
bool keep_id;
- fmode_t open_flags;
+ bool readonly;
struct kref kref;
struct completion *destroy_comp;
char pathname[NAME_MAX];
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 9fa821fa76b0..7bf4b48e2282 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -139,7 +139,7 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
* Needed to be able to install inside an ldom from an iso image.
*/
-static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
+static int vdc_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned command, unsigned long argument)
{
struct vdc_port *port = bdev->bd_disk->private_data;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 42b4b6828690..f85b6af414b4 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -608,20 +608,18 @@ static void setup_medium(struct floppy_state *fs)
}
}
-static int floppy_open(struct block_device *bdev, fmode_t mode)
+static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
int err;
- if (fs->ref_count == -1 || (fs->ref_count && mode & FMODE_EXCL))
+ if (fs->ref_count == -1 || (fs->ref_count && mode & BLK_OPEN_EXCL))
return -EBUSY;
-
- if (mode & FMODE_EXCL)
+ if (mode & BLK_OPEN_EXCL)
fs->ref_count = -1;
else
fs->ref_count++;
-
swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
udelay(10);
swim_drive(base, fs->location);
@@ -636,13 +634,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
set_capacity(fs->disk, fs->total_secs);
- if (mode & FMODE_NDELAY)
+ if (mode & BLK_OPEN_NDELAY)
return 0;
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- if (bdev_check_media_change(bdev) && fs->disk_in)
+ if (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) {
+ if (disk_check_media_change(disk) && fs->disk_in)
fs->ejected = 0;
- if ((mode & FMODE_WRITE) && fs->write_protected) {
+ if ((mode & BLK_OPEN_WRITE) && fs->write_protected) {
err = -EROFS;
goto out;
}
@@ -659,18 +657,18 @@ out:
return err;
}
-static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
mutex_lock(&swim_mutex);
- ret = floppy_open(bdev, mode);
+ ret = floppy_open(disk, mode);
mutex_unlock(&swim_mutex);
return ret;
}
-static void floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim __iomem *base = fs->swd->base;
@@ -686,7 +684,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&swim_mutex);
}
-static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index da811a7da03f..dc43a63b3469 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -246,10 +246,9 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible);
static void release_drive(struct floppy_state *fs);
static int fd_eject(struct floppy_state *fs);
-static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param);
-static int floppy_open(struct block_device *bdev, fmode_t mode);
-static void floppy_release(struct gendisk *disk, fmode_t mode);
+static int floppy_open(struct gendisk *disk, blk_mode_t mode);
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
@@ -883,7 +882,7 @@ static int fd_eject(struct floppy_state *fs)
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
-static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
+static int floppy_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
struct floppy_state *fs = bdev->bd_disk->private_data;
@@ -911,7 +910,7 @@ static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
-static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
@@ -923,9 +922,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
-static int floppy_open(struct block_device *bdev, fmode_t mode)
+static int floppy_open(struct gendisk *disk, blk_mode_t mode)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
int n, err = 0;
@@ -958,18 +957,18 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
swim3_action(fs, SETMFM);
swim3_select(fs, RELAX);
- } else if (fs->ref_count == -1 || mode & FMODE_EXCL)
+ } else if (fs->ref_count == -1 || mode & BLK_OPEN_EXCL)
return -EBUSY;
- if (err == 0 && (mode & FMODE_NDELAY) == 0
- && (mode & (FMODE_READ|FMODE_WRITE))) {
- if (bdev_check_media_change(bdev))
- floppy_revalidate(bdev->bd_disk);
+ if (err == 0 && !(mode & BLK_OPEN_NDELAY) &&
+ (mode & (BLK_OPEN_READ | BLK_OPEN_WRITE))) {
+ if (disk_check_media_change(disk))
+ floppy_revalidate(disk);
if (fs->ejected)
err = -ENXIO;
}
- if (err == 0 && (mode & FMODE_WRITE)) {
+ if (err == 0 && (mode & BLK_OPEN_WRITE)) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot)
@@ -985,7 +984,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return err;
}
- if (mode & FMODE_EXCL)
+ if (mode & BLK_OPEN_EXCL)
fs->ref_count = -1;
else
++fs->ref_count;
@@ -993,18 +992,18 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
}
-static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+static int floppy_unlocked_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
mutex_lock(&swim3_mutex);
- ret = floppy_open(bdev, mode);
+ ret = floppy_open(disk, mode);
mutex_unlock(&swim3_mutex);
return ret;
}
-static void floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c7ed5d69e9ee..1c823750c95a 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -43,6 +43,7 @@
#include <asm/page.h>
#include <linux/task_work.h>
#include <linux/namei.h>
+#include <linux/kref.h>
#include <uapi/linux/ublk_cmd.h>
#define UBLK_MINORS (1U << MINORBITS)
@@ -54,7 +55,8 @@
| UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
| UBLK_F_UNPRIVILEGED_DEV \
- | UBLK_F_CMD_IOCTL_ENCODE)
+ | UBLK_F_CMD_IOCTL_ENCODE \
+ | UBLK_F_USER_COPY)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
@@ -62,7 +64,8 @@
struct ublk_rq_data {
struct llist_node node;
- struct callback_head work;
+
+ struct kref ref;
};
struct ublk_uring_cmd_pdu {
@@ -182,8 +185,13 @@ struct ublk_params_header {
__u32 types;
};
+static inline void __ublk_complete_rq(struct request *req);
+static void ublk_complete_rq(struct kref *ref);
+
static dev_t ublk_chr_devt;
-static struct class *ublk_chr_class;
+static const struct class ublk_chr_class = {
+ .name = "ublk-char",
+};
static DEFINE_IDR(ublk_index_idr);
static DEFINE_SPINLOCK(ublk_idr_lock);
@@ -202,6 +210,23 @@ static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
+static inline unsigned ublk_pos_to_hwq(loff_t pos)
+{
+ return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
+ UBLK_QID_BITS_MASK;
+}
+
+static inline unsigned ublk_pos_to_buf_off(loff_t pos)
+{
+ return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
+}
+
+static inline unsigned ublk_pos_to_tag(loff_t pos)
+{
+ return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
+ UBLK_TAG_BITS_MASK;
+}
+
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
{
struct request_queue *q = ub->ub_disk->queue;
@@ -290,12 +315,52 @@ static int ublk_apply_params(struct ublk_device *ub)
return 0;
}
-static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
+static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
- if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
- !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
- return true;
- return false;
+ return ubq->flags & UBLK_F_USER_COPY;
+}
+
+static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
+{
+ /*
+ * read()/write() is involved in user copy, so request reference
+ * has to be grabbed
+ */
+ return ublk_support_user_copy(ubq);
+}
+
+static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
+ struct request *req)
+{
+ if (ublk_need_req_ref(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ kref_init(&data->ref);
+ }
+}
+
+static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
+ struct request *req)
+{
+ if (ublk_need_req_ref(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ return kref_get_unless_zero(&data->ref);
+ }
+
+ return true;
+}
+
+static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
+ struct request *req)
+{
+ if (ublk_need_req_ref(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ kref_put(&data->ref, ublk_complete_rq);
+ } else {
+ __ublk_complete_rq(req);
+ }
}
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
@@ -384,9 +449,9 @@ static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
*owner_gid = from_kgid(&init_user_ns, gid);
}
-static int ublk_open(struct block_device *bdev, fmode_t mode)
+static int ublk_open(struct gendisk *disk, blk_mode_t mode)
{
- struct ublk_device *ub = bdev->bd_disk->private_data;
+ struct ublk_device *ub = disk->private_data;
if (capable(CAP_SYS_ADMIN))
return 0;
@@ -421,49 +486,39 @@ static const struct block_device_operations ub_fops = {
#define UBLK_MAX_PIN_PAGES 32
-struct ublk_map_data {
- const struct request *rq;
- unsigned long ubuf;
- unsigned int len;
-};
-
struct ublk_io_iter {
struct page *pages[UBLK_MAX_PIN_PAGES];
- unsigned pg_off; /* offset in the 1st page in pages */
- int nr_pages; /* how many page pointers in pages */
struct bio *bio;
struct bvec_iter iter;
};
-static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
- unsigned max_bytes, bool to_vm)
+/* return how many pages are copied */
+static void ublk_copy_io_pages(struct ublk_io_iter *data,
+ size_t total, size_t pg_off, int dir)
{
- const unsigned total = min_t(unsigned, max_bytes,
- PAGE_SIZE - data->pg_off +
- ((data->nr_pages - 1) << PAGE_SHIFT));
unsigned done = 0;
unsigned pg_idx = 0;
while (done < total) {
struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- const unsigned int bytes = min3(bv.bv_len, total - done,
- (unsigned)(PAGE_SIZE - data->pg_off));
+ unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
+ (unsigned)(PAGE_SIZE - pg_off));
void *bv_buf = bvec_kmap_local(&bv);
void *pg_buf = kmap_local_page(data->pages[pg_idx]);
- if (to_vm)
- memcpy(pg_buf + data->pg_off, bv_buf, bytes);
+ if (dir == ITER_DEST)
+ memcpy(pg_buf + pg_off, bv_buf, bytes);
else
- memcpy(bv_buf, pg_buf + data->pg_off, bytes);
+ memcpy(bv_buf, pg_buf + pg_off, bytes);
kunmap_local(pg_buf);
kunmap_local(bv_buf);
/* advance page array */
- data->pg_off += bytes;
- if (data->pg_off == PAGE_SIZE) {
+ pg_off += bytes;
+ if (pg_off == PAGE_SIZE) {
pg_idx += 1;
- data->pg_off = 0;
+ pg_off = 0;
}
done += bytes;
@@ -477,41 +532,58 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
data->iter = data->bio->bi_iter;
}
}
+}
- return done;
+static bool ublk_advance_io_iter(const struct request *req,
+ struct ublk_io_iter *iter, unsigned int offset)
+{
+ struct bio *bio = req->bio;
+
+ for_each_bio(bio) {
+ if (bio->bi_iter.bi_size > offset) {
+ iter->bio = bio;
+ iter->iter = bio->bi_iter;
+ bio_advance_iter(iter->bio, &iter->iter, offset);
+ return true;
+ }
+ offset -= bio->bi_iter.bi_size;
+ }
+ return false;
}
-static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
+/*
+ * Copy data between request pages and io_iter, and 'offset'
+ * is the start point of linear offset of request.
+ */
+static size_t ublk_copy_user_pages(const struct request *req,
+ unsigned offset, struct iov_iter *uiter, int dir)
{
- const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
- const unsigned long start_vm = data->ubuf;
- unsigned int done = 0;
- struct ublk_io_iter iter = {
- .pg_off = start_vm & (PAGE_SIZE - 1),
- .bio = data->rq->bio,
- .iter = data->rq->bio->bi_iter,
- };
- const unsigned int nr_pages = round_up(data->len +
- (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
-
- while (done < nr_pages) {
- const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
- nr_pages - done);
- unsigned i, len;
-
- iter.nr_pages = get_user_pages_fast(start_vm +
- (done << PAGE_SHIFT), to_pin, gup_flags,
- iter.pages);
- if (iter.nr_pages <= 0)
- return done == 0 ? iter.nr_pages : done;
- len = ublk_copy_io_pages(&iter, data->len, to_vm);
- for (i = 0; i < iter.nr_pages; i++) {
- if (to_vm)
+ struct ublk_io_iter iter;
+ size_t done = 0;
+
+ if (!ublk_advance_io_iter(req, &iter, offset))
+ return 0;
+
+ while (iov_iter_count(uiter) && iter.bio) {
+ unsigned nr_pages;
+ ssize_t len;
+ size_t off;
+ int i;
+
+ len = iov_iter_get_pages2(uiter, iter.pages,
+ iov_iter_count(uiter),
+ UBLK_MAX_PIN_PAGES, &off);
+ if (len <= 0)
+ return done;
+
+ ublk_copy_io_pages(&iter, len, off, dir);
+ nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
+ for (i = 0; i < nr_pages; i++) {
+ if (dir == ITER_DEST)
set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]);
}
- data->len -= len;
- done += iter.nr_pages;
+ done += len;
}
return done;
@@ -532,21 +604,23 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
+ if (ublk_support_user_copy(ubq))
+ return rq_bytes;
+
/*
* no zero copy, we delay copy WRITE request data into ublksrv
* context and the big benefit is that pinning pages in current
* context is pretty fast, see ublk_pin_user_pages
*/
if (ublk_need_map_req(req)) {
- struct ublk_map_data data = {
- .rq = req,
- .ubuf = io->addr,
- .len = rq_bytes,
- };
+ struct iov_iter iter;
+ struct iovec iov;
+ const int dir = ITER_DEST;
- ublk_copy_user_pages(&data, true);
+ import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
+ &iov, &iter);
- return rq_bytes - data.len;
+ return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
@@ -557,18 +631,19 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
+ if (ublk_support_user_copy(ubq))
+ return rq_bytes;
+
if (ublk_need_unmap_req(req)) {
- struct ublk_map_data data = {
- .rq = req,
- .ubuf = io->addr,
- .len = io->res,
- };
+ struct iov_iter iter;
+ struct iovec iov;
+ const int dir = ITER_SOURCE;
WARN_ON_ONCE(io->res > rq_bytes);
- ublk_copy_user_pages(&data, false);
-
- return io->res - data.len;
+ import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
+ &iov, &iter);
+ return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
@@ -648,13 +723,19 @@ static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
}
/* todo: handle partial completion */
-static void ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
+ /* called from ublk_abort_queue() code path */
+ if (io->flags & UBLK_IO_FLAG_ABORTED) {
+ res = BLK_STS_IOERR;
+ goto exit;
+ }
+
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -694,6 +775,15 @@ exit:
blk_mq_end_request(req, res);
}
+static void ublk_complete_rq(struct kref *ref)
+{
+ struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
+ ref);
+ struct request *req = blk_mq_rq_from_pdu(data);
+
+ __ublk_complete_rq(req);
+}
+
/*
* Since __ublk_rq_task_work always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
@@ -712,7 +802,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
if (ublk_queue_can_use_recovery_reissue(ubq))
blk_mq_requeue_request(req, false);
else
- blk_mq_end_request(req, BLK_STS_IOERR);
+ ublk_put_req_ref(ubq, req);
}
}
@@ -821,6 +911,7 @@ static inline void __ublk_rq_task_work(struct request *req,
mapped_bytes >> 9;
}
+ ublk_init_req_ref(ubq, req);
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
@@ -852,17 +943,6 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
ublk_forward_io_cmds(ubq, issue_flags);
}
-static void ublk_rq_task_work_fn(struct callback_head *work)
-{
- struct ublk_rq_data *data = container_of(work,
- struct ublk_rq_data, work);
- struct request *req = blk_mq_rq_from_pdu(data);
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- unsigned issue_flags = IO_URING_F_UNLOCKED;
-
- ublk_forward_io_cmds(ubq, issue_flags);
-}
-
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
@@ -886,10 +966,6 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
*/
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
ublk_abort_io_cmds(ubq);
- } else if (ublk_can_use_task_work(ubq)) {
- if (task_work_add(ubq->ubq_daemon, &data->work,
- TWA_SIGNAL_NO_IPI))
- ublk_abort_io_cmds(ubq);
} else {
struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
@@ -961,19 +1037,9 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
return 0;
}
-static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- init_task_work(&data->work, ublk_rq_task_work_fn);
- return 0;
-}
-
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
.init_hctx = ublk_init_hctx,
- .init_request = ublk_init_rq,
.timeout = ublk_timeout,
};
@@ -1050,7 +1116,7 @@ static void ublk_commit_completion(struct ublk_device *ub,
req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
if (req && likely(!blk_should_fake_timeout(req->q)))
- ublk_complete_rq(req);
+ ublk_put_req_ref(ubq, req);
}
/*
@@ -1120,6 +1186,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
return ubq->nr_io_ready == ubq->q_depth;
}
+static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+{
+ io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+}
+
static void ublk_cancel_queue(struct ublk_queue *ubq)
{
int i;
@@ -1131,8 +1202,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
- IO_URING_F_UNLOCKED);
+ io_uring_cmd_complete_in_task(io->cmd,
+ ublk_cmd_cancel_cb);
}
/* all io commands are canceled */
@@ -1290,6 +1361,14 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
return 0;
}
+static inline void ublk_fill_io_cmd(struct ublk_io *io,
+ struct io_uring_cmd *cmd, unsigned long buf_addr)
+{
+ io->cmd = cmd;
+ io->flags |= UBLK_IO_FLAG_ACTIVE;
+ io->addr = buf_addr;
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -1335,6 +1414,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
goto out;
+ if (ublk_support_user_copy(ubq) && ub_cmd->addr) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ublk_check_cmd_op(cmd_op);
if (ret)
goto out;
@@ -1353,36 +1437,41 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
*/
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
- /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
- io->cmd = cmd;
- io->flags |= UBLK_IO_FLAG_ACTIVE;
- io->addr = ub_cmd->addr;
+ if (!ublk_support_user_copy(ubq)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!ub_cmd->addr && !ublk_need_get_data(ubq))
+ goto out;
+ }
+
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
- /*
- * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
- * not enabled or it is Read IO.
- */
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
- goto out;
+
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
- io->addr = ub_cmd->addr;
- io->flags |= UBLK_IO_FLAG_ACTIVE;
- io->cmd = cmd;
+
+ if (!ublk_support_user_copy(ubq)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ req_op(req) == REQ_OP_READ))
+ goto out;
+ }
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
- io->addr = ub_cmd->addr;
- io->cmd = cmd;
- io->flags |= UBLK_IO_FLAG_ACTIVE;
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
break;
default:
@@ -1397,6 +1486,36 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
}
+static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
+ struct ublk_queue *ubq, int tag, size_t offset)
+{
+ struct request *req;
+
+ if (!ublk_need_req_ref(ubq))
+ return NULL;
+
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ if (!req)
+ return NULL;
+
+ if (!ublk_get_req_ref(ubq, req))
+ return NULL;
+
+ if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
+ goto fail_put;
+
+ if (!ublk_rq_has_data(req))
+ goto fail_put;
+
+ if (offset > blk_rq_bytes(req))
+ goto fail_put;
+
+ return req;
+fail_put:
+ ublk_put_req_ref(ubq, req);
+ return NULL;
+}
+
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
/*
@@ -1414,11 +1533,112 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
}
+static inline bool ublk_check_ubuf_dir(const struct request *req,
+ int ubuf_dir)
+{
+ /* copy ubuf to request pages */
+ if (req_op(req) == REQ_OP_READ && ubuf_dir == ITER_SOURCE)
+ return true;
+
+ /* copy request pages to ubuf */
+ if (req_op(req) == REQ_OP_WRITE && ubuf_dir == ITER_DEST)
+ return true;
+
+ return false;
+}
+
+static struct request *ublk_check_and_get_req(struct kiocb *iocb,
+ struct iov_iter *iter, size_t *off, int dir)
+{
+ struct ublk_device *ub = iocb->ki_filp->private_data;
+ struct ublk_queue *ubq;
+ struct request *req;
+ size_t buf_off;
+ u16 tag, q_id;
+
+ if (!ub)
+ return ERR_PTR(-EACCES);
+
+ if (!user_backed_iter(iter))
+ return ERR_PTR(-EACCES);
+
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ return ERR_PTR(-EACCES);
+
+ tag = ublk_pos_to_tag(iocb->ki_pos);
+ q_id = ublk_pos_to_hwq(iocb->ki_pos);
+ buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
+
+ if (q_id >= ub->dev_info.nr_hw_queues)
+ return ERR_PTR(-EINVAL);
+
+ ubq = ublk_get_queue(ub, q_id);
+ if (!ubq)
+ return ERR_PTR(-EINVAL);
+
+ if (tag >= ubq->q_depth)
+ return ERR_PTR(-EINVAL);
+
+ req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
+ if (!req)
+ return ERR_PTR(-EINVAL);
+
+ if (!req->mq_hctx || !req->mq_hctx->driver_data)
+ goto fail;
+
+ if (!ublk_check_ubuf_dir(req, dir))
+ goto fail;
+
+ *off = buf_off;
+ return req;
+fail:
+ ublk_put_req_ref(ubq, req);
+ return ERR_PTR(-EACCES);
+}
+
+static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct ublk_queue *ubq;
+ struct request *req;
+ size_t buf_off;
+ size_t ret;
+
+ req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
+ ubq = req->mq_hctx->driver_data;
+ ublk_put_req_ref(ubq, req);
+
+ return ret;
+}
+
+static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct ublk_queue *ubq;
+ struct request *req;
+ size_t buf_off;
+ size_t ret;
+
+ req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
+ ubq = req->mq_hctx->driver_data;
+ ublk_put_req_ref(ubq, req);
+
+ return ret;
+}
+
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
.release = ublk_ch_release,
.llseek = no_llseek,
+ .read_iter = ublk_ch_read_iter,
+ .write_iter = ublk_ch_write_iter,
.uring_cmd = ublk_ch_uring_cmd,
.mmap = ublk_ch_mmap,
};
@@ -1542,7 +1762,7 @@ static int ublk_add_chdev(struct ublk_device *ub)
dev->parent = ublk_misc.this_device;
dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
- dev->class = ublk_chr_class;
+ dev->class = &ublk_chr_class;
dev->release = ublk_cdev_rel;
device_initialize(dev);
@@ -1813,10 +2033,12 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
*/
ub->dev_info.flags &= UBLK_F_ALL;
- if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
- ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
+ ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
+ UBLK_F_URING_CMD_COMP_IN_TASK;
- ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
+ /* GET_DATA isn't needed any more with USER_COPY */
+ if (ub->dev_info.flags & UBLK_F_USER_COPY)
+ ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
/* We are not ready to support zero copy */
ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
@@ -2128,6 +2350,21 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
return ret;
}
+static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
+{
+ const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
+
+ if (header->len != UBLK_FEATURES_LEN || !header->addr)
+ return -EINVAL;
+
+ if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
+ return -EFAULT;
+
+ return 0;
+}
+
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
@@ -2208,6 +2445,7 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
case UBLK_CMD_GET_DEV_INFO2:
case UBLK_CMD_GET_QUEUE_AFFINITY:
case UBLK_CMD_GET_PARAMS:
+ case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
mask = MAY_READ;
break;
case UBLK_CMD_START_DEV:
@@ -2257,6 +2495,11 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ret)
goto out;
+ if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
+ ret = ublk_ctrl_get_features(cmd);
+ goto out;
+ }
+
if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
ret = -ENODEV;
ub = ublk_get_device_from_id(header->dev_id);
@@ -2332,6 +2575,9 @@ static int __init ublk_init(void)
{
int ret;
+ BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
+ UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
+
init_waitqueue_head(&ublk_idr_wq);
ret = misc_register(&ublk_misc);
@@ -2342,11 +2588,10 @@ static int __init ublk_init(void)
if (ret)
goto unregister_mis;
- ublk_chr_class = class_create("ublk-char");
- if (IS_ERR(ublk_chr_class)) {
- ret = PTR_ERR(ublk_chr_class);
+ ret = class_register(&ublk_chr_class);
+ if (ret)
goto free_chrdev_region;
- }
+
return 0;
free_chrdev_region:
@@ -2364,7 +2609,7 @@ static void __exit ublk_exit(void)
idr_for_each_entry(&ublk_index_idr, ub, id)
ublk_remove(ub);
- class_destroy(ublk_chr_class);
+ class_unregister(&ublk_chr_class);
misc_deregister(&ublk_misc);
idr_destroy(&ublk_index_idr);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2b918e28acaa..b47358da92a2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -348,63 +348,33 @@ static inline void virtblk_request_done(struct request *req)
blk_mq_end_request(req, status);
}
-static void virtblk_complete_batch(struct io_comp_batch *iob)
-{
- struct request *req;
-
- rq_list_for_each(&iob->req_list, req) {
- virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
- virtblk_cleanup_cmd(req);
- }
- blk_mq_end_request_batch(iob);
-}
-
-static int virtblk_handle_req(struct virtio_blk_vq *vq,
- struct io_comp_batch *iob)
-{
- struct virtblk_req *vbr;
- int req_done = 0;
- unsigned int len;
-
- while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
- struct request *req = blk_mq_rq_from_pdu(vbr);
-
- if (likely(!blk_should_fake_timeout(req->q)) &&
- !blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
- virtblk_request_done(req);
- req_done++;
- }
-
- return req_done;
-}
-
static void virtblk_done(struct virtqueue *vq)
{
struct virtio_blk *vblk = vq->vdev->priv;
- struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index];
- int req_done = 0;
+ bool req_done = false;
+ int qid = vq->index;
+ struct virtblk_req *vbr;
unsigned long flags;
- DEFINE_IO_COMP_BATCH(iob);
+ unsigned int len;
- spin_lock_irqsave(&vblk_vq->lock, flags);
+ spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
- req_done += virtblk_handle_req(vblk_vq, &iob);
+ while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+ if (likely(!blk_should_fake_timeout(req->q)))
+ blk_mq_complete_request(req);
+ req_done = true;
+ }
if (unlikely(virtqueue_is_broken(vq)))
break;
} while (!virtqueue_enable_cb(vq));
- if (req_done) {
- if (!rq_list_empty(iob.req_list))
- iob.complete(&iob);
-
- /* In case queue is stopped waiting for more buffers. */
+ /* In case queue is stopped waiting for more buffers. */
+ if (req_done)
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
- }
- spin_unlock_irqrestore(&vblk_vq->lock, flags);
+ spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
@@ -1283,15 +1253,37 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
}
}
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
+ virtblk_cleanup_cmd(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
+ struct virtblk_req *vbr;
unsigned long flags;
+ unsigned int len;
int found = 0;
spin_lock_irqsave(&vq->lock, flags);
- found = virtblk_handle_req(vq, iob);
+
+ while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+ struct request *req = blk_mq_rq_from_pdu(vbr);
+
+ found++;
+ if (!blk_mq_complete_request_remote(req) &&
+ !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
+ virtblk_complete_batch))
+ virtblk_request_done(req);
+ }
if (found)
blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 4807af1d5805..bb66178c432b 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -473,7 +473,7 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
static void xen_vbd_free(struct xen_vbd *vbd)
{
if (vbd->bdev)
- blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
+ blkdev_put(vbd->bdev, NULL);
vbd->bdev = NULL;
}
@@ -492,7 +492,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->pdevice = MKDEV(major, minor);
bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
- FMODE_READ : FMODE_WRITE, NULL);
+ BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
if (IS_ERR(bdev)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 23ed258b57f0..434fab306777 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -509,7 +509,7 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
return 0;
}
-static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
+static int blkif_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned command, unsigned long argument)
{
struct blkfront_info *info = bdev->bd_disk->private_data;
@@ -780,7 +780,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
+ if (req_op(req) == REQ_OP_FLUSH ||
+ (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index c1e85f356e4d..11493167b0a8 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -140,16 +140,14 @@ static void get_chipram(void)
return;
}
-static int z2_open(struct block_device *bdev, fmode_t mode)
+static int z2_open(struct gendisk *disk, blk_mode_t mode)
{
- int device;
+ int device = disk->first_minor;
int max_z2_map = (Z2RAM_SIZE / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]);
int max_chip_map = (amiga_chip_size / Z2RAM_CHUNKSIZE) *
sizeof(z2ram_map[0]);
int rc = -ENOMEM;
- device = MINOR(bdev->bd_dev);
-
mutex_lock(&z2ram_mutex);
if (current_device != -1 && current_device != device) {
rc = -EBUSY;
@@ -290,7 +288,7 @@ err_out:
return rc;
}
-static void z2_release(struct gendisk *disk, fmode_t mode)
+static void z2_release(struct gendisk *disk)
{
mutex_lock(&z2ram_mutex);
if (current_device == -1) {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f6d90f1ba5cf..1867f378b319 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -420,7 +420,7 @@ static void reset_bdev(struct zram *zram)
return;
bdev = zram->bdev;
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ blkdev_put(bdev, zram);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
@@ -507,8 +507,8 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev = blkdev_get_by_dev(inode->i_rdev,
- FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ zram, NULL);
if (IS_ERR(bdev)) {
err = PTR_ERR(bdev);
bdev = NULL;
@@ -539,7 +539,7 @@ out:
kvfree(bitmap);
if (bdev)
- blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(bdev, zram);
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -700,7 +700,7 @@ static ssize_t writeback_store(struct device *dev,
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- bio_add_page(&bio, page, PAGE_SIZE, 0);
+ __bio_add_page(&bio, page, PAGE_SIZE, 0);
/*
* XXX: A single page IO would be inefficient for write
@@ -2097,19 +2097,16 @@ static ssize_t reset_store(struct device *dev,
return len;
}
-static int zram_open(struct block_device *bdev, fmode_t mode)
+static int zram_open(struct gendisk *disk, blk_mode_t mode)
{
- int ret = 0;
- struct zram *zram;
+ struct zram *zram = disk->private_data;
- WARN_ON(!mutex_is_locked(&bdev->bd_disk->open_mutex));
+ WARN_ON(!mutex_is_locked(&disk->open_mutex));
- zram = bdev->bd_disk->private_data;
/* zram was claimed to reset so open request fails */
if (zram->claim)
- ret = -EBUSY;
-
- return ret;
+ return -EBUSY;
+ return 0;
}
static const struct block_device_operations zram_devops = {
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 3a34d7c1475b..52ef44688d38 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
hci_free_dev(hdev);
}
-static struct btnxpuart_data w8987_data = {
+static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
};
-static struct btnxpuart_data w8997_data = {
+static struct btnxpuart_data w8997_data __maybe_unused = {
.helper_fw_name = FIRMWARE_HELPER,
.fw_name = FIRMWARE_W8997,
};
-static const struct of_device_id nxpuart_of_match_table[] = {
+static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
{ .compatible = "nxp,88w8987-bt", .data = &w8987_data },
{ .compatible = "nxp,88w8997-bt", .data = &w8997_data },
{ }
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 1b064504b388..e30c979535b1 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -78,7 +78,8 @@ enum qca_flags {
QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED,
QCA_BT_OFF,
- QCA_ROM_FW
+ QCA_ROM_FW,
+ QCA_DEBUGFS_CREATED,
};
enum qca_capabilities {
@@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
if (!hdev->debugfs)
return;
+ if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
+ return;
+
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
/* read only */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 416f723a2dbb..cc2839805983 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -264,6 +264,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/nospec.h>
#include <linux/slab.h>
#include <linux/cdrom.h>
#include <linux/sysctl.h>
@@ -978,15 +979,6 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
cdi->media_written = 0;
}
-static int cdrom_close_write(struct cdrom_device_info *cdi)
-{
-#if 0
- return cdrom_flush_cache(cdi);
-#else
- return 0;
-#endif
-}
-
/* badly broken, I know. Is due for a fixup anytime. */
static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
{
@@ -1155,8 +1147,7 @@ clean_up_and_return:
* is in their own interest: device control becomes a lot easier
* this way.
*/
-int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode)
+int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode)
{
int ret;
@@ -1165,7 +1156,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
/* if this was a O_NONBLOCK open and we should honor the flags,
* do a quick open without drive/disc integrity checks. */
cdi->use_count++;
- if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
+ if ((mode & BLK_OPEN_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
ret = cdi->ops->open(cdi, 1);
} else {
ret = open_for_data(cdi);
@@ -1173,7 +1164,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
goto err;
if (CDROM_CAN(CDC_GENERIC_PACKET))
cdrom_mmc3_profile(cdi);
- if (mode & FMODE_WRITE) {
+ if (mode & BLK_OPEN_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
goto err_release;
@@ -1182,6 +1173,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = 0;
cdi->media_written = 0;
}
+ cdi->opened_for_data = true;
}
if (ret)
@@ -1259,10 +1251,9 @@ static int check_for_audio_disc(struct cdrom_device_info *cdi,
return 0;
}
-void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
+void cdrom_release(struct cdrom_device_info *cdi)
{
const struct cdrom_device_ops *cdo = cdi->ops;
- int opened_for_data;
cd_dbg(CD_CLOSE, "entering cdrom_release\n");
@@ -1280,20 +1271,12 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
}
}
- opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
- !(mode & FMODE_NDELAY);
-
- /*
- * flush cache on last write release
- */
- if (CDROM_CAN(CDC_RAM) && !cdi->use_count && cdi->for_data)
- cdrom_close_write(cdi);
-
cdo->release(cdi);
- if (cdi->use_count == 0) { /* last process that closes dev*/
- if (opened_for_data &&
- cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
+
+ if (cdi->use_count == 0 && cdi->opened_for_data) {
+ if (cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
cdo->tray_move(cdi, 1);
+ cdi->opened_for_data = false;
}
}
EXPORT_SYMBOL(cdrom_release);
@@ -2329,6 +2312,9 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (arg >= cdi->capacity)
return -EINVAL;
+ /* Prevent arg from speculatively bypassing the length check */
+ barrier_nospec();
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -3337,7 +3323,7 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
* ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
*/
int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int ret;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index ceded5772aac..3a46e27479ff 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -474,19 +474,19 @@ static const struct cdrom_device_ops gdrom_ops = {
CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
};
-static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
+static int gdrom_bdops_open(struct gendisk *disk, blk_mode_t mode)
{
int ret;
- bdev_check_media_change(bdev);
+ disk_check_media_change(disk);
mutex_lock(&gdrom_mutex);
- ret = cdrom_open(gd.cd_info, bdev, mode);
+ ret = cdrom_open(gd.cd_info);
mutex_unlock(&gdrom_mutex);
return ret;
}
-static void gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
+static void gdrom_bdops_release(struct gendisk *disk)
{
mutex_lock(&gdrom_mutex);
cdrom_release(gd.cd_info, mode);
@@ -499,13 +499,13 @@ static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
return cdrom_check_events(gd.cd_info, clearing);
}
-static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
+static int gdrom_bdops_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned cmd, unsigned long arg)
{
int ret;
mutex_lock(&gdrom_mutex);
- ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
+ ret = cdrom_ioctl(gd.cd_info, bdev, cmd, arg);
mutex_unlock(&gdrom_mutex);
return ret;
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index d68d05d5d383..514f9f287a78 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem)
{
struct _parisc_agp_info *info = &parisc_agp_info;
+ /* force fdc ops to be visible to IOMMU */
+ asm_io_sync();
+
writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
readq(info->ioc_regs+IOC_PCOM); /* flush */
}
@@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
info->gatt[j] =
parisc_agp_mask_memory(agp_bridge,
paddr, type);
+ asm_io_fdc(&info->gatt[j]);
}
}
@@ -191,7 +195,16 @@ static unsigned long
parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type)
{
- return SBA_PDIR_VALID_BIT | addr;
+ unsigned ci; /* coherent index */
+ dma_addr_t pa;
+
+ pa = addr & IOVP_MASK;
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
+
+ pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
+
+ return cpu_to_le64(pa);
}
static void
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 253f2ddb8913..3cb37760dfec 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1546,7 +1546,7 @@ const struct file_operations random_fops = {
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
};
@@ -1557,7 +1557,7 @@ const struct file_operations urandom_fops = {
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index c10a4aa97373..cd48033b804a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -571,6 +571,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ return 0;
+
return tpm_get_random(chip, data, max);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 4463d0018290..586ca10b0d72 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev)
}
suspended:
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
@@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev)
if (chip == NULL)
return -ENODEV;
+ chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
+
+ /*
+ * Guarantee that SUSPENDED is written last, so that hwrng does not
+ * activate before the chip has been fully resumed.
+ */
+ wmb();
+
return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7af389806643..7db3593941ea 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -122,6 +122,29 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
},
},
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkStation P360 Tiny",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
+ },
+ },
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkPad L490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
+ },
+ },
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "UPX-TGL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ },
+ },
{}
};
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 02945d53fcef..558144fa707a 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1209,25 +1209,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
- if (chip->ops->clk_enable != NULL)
- chip->ops->clk_enable(chip, true);
-
- /* reenable interrupts that device may have lost or
- * BIOS/firmware may have disabled
+ /*
+ * Re-enable interrupts that device may have lost or BIOS/firmware may
+ * have disabled.
*/
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
- if (rc < 0)
- goto out;
+ if (rc < 0) {
+ dev_err(&chip->dev, "Setting IRQ failed.\n");
+ return;
+ }
intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
-
- tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
-
-out:
- if (chip->ops->clk_enable != NULL)
- chip->ops->clk_enable(chip, false);
-
- return;
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ if (rc < 0)
+ dev_err(&chip->dev, "Enabling interrupts failed.\n");
}
int tpm_tis_resume(struct device *dev)
@@ -1235,27 +1230,27 @@ int tpm_tis_resume(struct device *dev)
struct tpm_chip *chip = dev_get_drvdata(dev);
int ret;
- ret = tpm_tis_request_locality(chip, 0);
- if (ret < 0)
+ ret = tpm_chip_start(chip);
+ if (ret)
return ret;
if (chip->flags & TPM_CHIP_FLAG_IRQ)
tpm_tis_reenable_interrupts(chip);
- ret = tpm_pm_resume(dev);
- if (ret)
- goto out;
-
/*
* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
tpm1_do_selftest(chip);
-out:
- tpm_tis_relinquish_locality(chip, 0);
- return ret;
+ tpm_chip_stop(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (ret)
+ return ret;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);
#endif
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index e978f457fd4d..610bfadb6acf 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -84,10 +84,10 @@ enum tis_defaults {
#define ILB_REMAP_SIZE 0x100
enum tpm_tis_flags {
- TPM_TIS_ITPM_WORKAROUND = BIT(0),
- TPM_TIS_INVALID_STATUS = BIT(1),
- TPM_TIS_DEFAULT_CANCELLATION = BIT(2),
- TPM_TIS_IRQ_TESTED = BIT(3),
+ TPM_TIS_ITPM_WORKAROUND = 0,
+ TPM_TIS_INVALID_STATUS = 1,
+ TPM_TIS_DEFAULT_CANCELLATION = 2,
+ TPM_TIS_IRQ_TESTED = 3,
};
struct tpm_tis_data {
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index edfa94641bbf..66759fe28fad 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -119,7 +119,10 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
if (ret)
continue;
- rate_diff = abs(req->rate - tmp_req.rate);
+ if (req->rate >= tmp_req.rate)
+ rate_diff = req->rate - tmp_req.rate;
+ else
+ rate_diff = tmp_req.rate - req->rate;
if (!rate_diff || !req->best_parent_hw
|| best_rate_diff > rate_diff) {
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 70ae1dd2e474..bacdcbb287ac 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -40,7 +40,7 @@ static struct clk_hw *loongson2_clk_register(struct device *dev,
{
int ret;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = { };
hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
if (!hw)
diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c
index 22fc7491ba00..f6ea7e5052d5 100644
--- a/drivers/clk/imx/clk-imx1.c
+++ b/drivers/clk/imx/clk-imx1.c
@@ -10,7 +10,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx1-clock.h>
-#include <soc/imx/timer.h>
#include <asm/irq.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index 5d177125728d..99618ded0939 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -8,7 +8,6 @@
#include <linux/of_address.h>
#include <dt-bindings/clock/imx27-clock.h>
#include <soc/imx/revision.h>
-#include <soc/imx/timer.h>
#include <asm/irq.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index c44e18c6f63f..4c8d9ff0b2ad 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -11,7 +11,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <soc/imx/revision.h>
-#include <soc/imx/timer.h>
#include <asm/irq.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index 7dcbaea3fea3..3b6fdb4e0be7 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -10,7 +10,6 @@
#include <linux/of.h>
#include <linux/err.h>
#include <soc/imx/revision.h>
-#include <soc/imx/timer.h>
#include <asm/irq.h>
#include "clk.h"
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
index 6b4e193f648d..c87a6c4a7967 100644
--- a/drivers/clk/mediatek/clk-mt8365.c
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -23,6 +23,7 @@
static DEFINE_SPINLOCK(mt8365_clk_lock);
static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
FIXED_CLK(CLK_TOP_I2S0_BCK, "i2s0_bck", NULL, 26000000),
FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m",
75000000),
@@ -559,6 +560,14 @@ static const struct mtk_clk_divider top_adj_divs[] = {
0x324, 16, 8, CLK_DIVIDER_ROUND_CLOSEST),
DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "apll_i2s3_sel",
0x324, 24, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "apll_tdmout_sel",
+ 0x328, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll_tdmout_sel",
+ 0x328, 8, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "apll_tdmin_sel",
+ 0x328, 16, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll_tdmin_sel",
+ 0x328, 24, 8, CLK_DIVIDER_ROUND_CLOSEST),
DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "apll_spdif_sel",
0x32c, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
};
@@ -583,15 +592,15 @@ static const struct mtk_gate_regs top2_cg_regs = {
#define GATE_TOP0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &top0_cg_regs, \
- _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ _shift, &mtk_clk_gate_ops_no_setclr)
#define GATE_TOP1(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &top1_cg_regs, \
- _shift, &mtk_clk_gate_ops_no_setclr)
+ _shift, &mtk_clk_gate_ops_no_setclr_inv)
#define GATE_TOP2(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &top2_cg_regs, \
- _shift, &mtk_clk_gate_ops_no_setclr)
+ _shift, &mtk_clk_gate_ops_no_setclr_inv)
static const struct mtk_gate top_clk_gates[] = {
GATE_TOP0(CLK_TOP_CONN_32K, "conn_32k", "clk32k", 10),
@@ -696,6 +705,7 @@ static const struct mtk_gate ifr_clks[] = {
GATE_IFR3(CLK_IFR_GCPU, "ifr_gcpu", "axi_sel", 8),
GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_sel", 9),
GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "clk26m", 10),
+ GATE_IFR3(CLK_IFR_CPUM, "ifr_cpum", "clk26m", 11),
GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "clk26m", 14),
GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_sel", 18),
GATE_IFR3(CLK_IFR_DEBUGSYS, "ifr_debugsys", "axi_sel", 24),
@@ -717,6 +727,8 @@ static const struct mtk_gate ifr_clks[] = {
GATE_IFR5(CLK_IFR_PWRAP_TMR, "ifr_pwrap_tmr", "clk26m", 12),
GATE_IFR5(CLK_IFR_PWRAP_SPI, "ifr_pwrap_spi", "clk26m", 13),
GATE_IFR5(CLK_IFR_PWRAP_SYS, "ifr_pwrap_sys", "clk26m", 14),
+ GATE_MTK_FLAGS(CLK_IFR_MCU_PM_BK, "ifr_mcu_pm_bk", NULL, &ifr5_cg_regs,
+ 17, &mtk_clk_gate_ops_setclr, CLK_IGNORE_UNUSED),
GATE_IFR5(CLK_IFR_IRRX_26M, "ifr_irrx_26m", "clk26m", 22),
GATE_IFR5(CLK_IFR_IRRX_32K, "ifr_irrx_32k", "clk32k", 23),
GATE_IFR5(CLK_IFR_I2C0_AXI, "ifr_i2c0_axi", "i2c_sel", 24),
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 42958a542662..621e298f101a 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -164,7 +164,7 @@ void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask)
accr &= ~disable;
accr |= enable;
- writel(accr, ACCR);
+ writel(accr, clk_regs + ACCR);
if (xclkcfg)
__asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 526382dc7482..c4d671a5a13d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -612,6 +612,15 @@ config TIMER_IMX_SYS_CTR
Enable this option to use i.MX system counter timer as a
clockevent.
+config CLKSRC_LOONGSON1_PWM
+ bool "Clocksource using Loongson1 PWM"
+ depends on MACH_LOONGSON32 || COMPILE_TEST
+ select MIPS_EXTERNAL_TIMER
+ select TIMER_OF
+ help
+ Enable this option to use Loongson1 PWM timer as clocksource
+ instead of the performance counter.
+
config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f12d3987a960..5d93c9e3fc55 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -89,3 +89,4 @@ obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o
obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
+obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e09d4427f604..e733a2a1927a 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -191,22 +191,40 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
-static notrace u64 arch_counter_get_cntpct_stable(void)
+static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
}
-static notrace u64 arch_counter_get_cntpct(void)
+static notrace u64 arch_counter_get_cntpct_stable(void)
+{
+ u64 val;
+ preempt_disable_notrace();
+ val = __arch_counter_get_cntpct_stable();
+ preempt_enable_notrace();
+ return val;
+}
+
+static noinstr u64 arch_counter_get_cntpct(void)
{
return __arch_counter_get_cntpct();
}
-static notrace u64 arch_counter_get_cntvct_stable(void)
+static noinstr u64 raw_counter_get_cntvct_stable(void)
{
return __arch_counter_get_cntvct_stable();
}
-static notrace u64 arch_counter_get_cntvct(void)
+static notrace u64 arch_counter_get_cntvct_stable(void)
+{
+ u64 val;
+ preempt_disable_notrace();
+ val = __arch_counter_get_cntvct_stable();
+ preempt_enable_notrace();
+ return val;
+}
+
+static noinstr u64 arch_counter_get_cntvct(void)
{
return __arch_counter_get_cntvct();
}
@@ -753,14 +771,14 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
-static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
+static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
{
u32 cnt_lo, cnt_hi, tmp_hi;
do {
- cnt_hi = readl_relaxed(t->base + offset_lo + 4);
- cnt_lo = readl_relaxed(t->base + offset_lo);
- tmp_hi = readl_relaxed(t->base + offset_lo + 4);
+ cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
+ tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
} while (cnt_hi != tmp_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
@@ -1060,7 +1078,7 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static u64 arch_counter_get_cntvct_mem(void)
+static noinstr u64 arch_counter_get_cntvct_mem(void)
{
return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
}
@@ -1074,6 +1092,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
static void __init arch_counter_register(unsigned type)
{
+ u64 (*scr)(void);
u64 start_count;
int width;
@@ -1083,21 +1102,28 @@ static void __init arch_counter_register(unsigned type)
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa())
+ if (arch_timer_counter_has_wa()) {
rd = arch_counter_get_cntvct_stable;
- else
+ scr = raw_counter_get_cntvct_stable;
+ } else {
rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
+ }
} else {
- if (arch_timer_counter_has_wa())
+ if (arch_timer_counter_has_wa()) {
rd = arch_counter_get_cntpct_stable;
- else
+ scr = raw_counter_get_cntpct_stable;
+ } else {
rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
arch_timer_read_counter = rd;
clocksource_counter.vdso_clock_mode = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
+ scr = arch_counter_get_cntvct_mem;
}
width = arch_counter_get_width();
@@ -1113,7 +1139,7 @@ static void __init arch_counter_register(unsigned type)
timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count);
- sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
+ sched_clock_register(scr, width, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index bcd9042a0c9f..e56307a81f4d 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -365,6 +365,20 @@ void hv_stimer_global_cleanup(void)
}
EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
+static __always_inline u64 read_hv_clock_msr(void)
+{
+ /*
+ * Read the partition counter to get the current tick count. This count
+ * is set to 0 when the partition is created and is incremented in 100
+ * nanosecond units.
+ *
+ * Use hv_raw_get_register() because this function is used from
+ * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic
+ * register it doesn't need the GHCB path.
+ */
+ return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT);
+}
+
/*
* Code and definitions for the Hyper-V clocksources. Two
* clocksources are defined: one that reads the Hyper-V defined MSR, and
@@ -393,14 +407,20 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
-static u64 notrace read_hv_clock_tsc(void)
+static __always_inline u64 read_hv_clock_tsc(void)
{
- u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
+ u64 cur_tsc, time;
- if (current_tick == U64_MAX)
- current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
+ /*
+ * The Hyper-V Top-Level Function Spec (TLFS), section Timers,
+ * subsection Refererence Counter, guarantees that the TSC and MSR
+ * times are in sync and monotonic. Therefore we can fall back
+ * to the MSR in case the TSC page indicates unavailability.
+ */
+ if (!hv_read_tsc_page_tsc(tsc_page, &cur_tsc, &time))
+ time = read_hv_clock_msr();
- return current_tick;
+ return time;
}
static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
@@ -408,7 +428,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
return read_hv_clock_tsc();
}
-static u64 notrace read_hv_sched_clock_tsc(void)
+static u64 noinstr read_hv_sched_clock_tsc(void)
{
return (read_hv_clock_tsc() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ);
@@ -460,30 +480,14 @@ static struct clocksource hyperv_cs_tsc = {
#endif
};
-static u64 notrace read_hv_clock_msr(void)
-{
- /*
- * Read the partition counter to get the current tick count. This count
- * is set to 0 when the partition is created and is incremented in
- * 100 nanosecond units.
- */
- return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
-}
-
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
{
return read_hv_clock_msr();
}
-static u64 notrace read_hv_sched_clock_msr(void)
-{
- return (read_hv_clock_msr() - hv_sched_clock_offset) *
- (NSEC_PER_SEC / HV_CLOCK_HZ);
-}
-
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
- .rating = 500,
+ .rating = 495,
.read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -513,7 +517,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock)
static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
#endif /* CONFIG_GENERIC_SCHED_CLOCK */
-static bool __init hv_init_tsc_clocksource(void)
+static void __init hv_init_tsc_clocksource(void)
{
union hv_reference_tsc_msr tsc_msr;
@@ -524,17 +528,14 @@ static bool __init hv_init_tsc_clocksource(void)
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
- * While the Hyper-V MSR clocksource won't be used since the
- * Reference TSC clocksource is present, change its rating as
- * well for consistency.
*/
if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
hyperv_cs_tsc.rating = 250;
- hyperv_cs_msr.rating = 250;
+ hyperv_cs_msr.rating = 245;
}
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
- return false;
+ return;
hv_read_reference_counter = read_hv_clock_tsc;
@@ -565,33 +566,34 @@ static bool __init hv_init_tsc_clocksource(void)
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- hv_sched_clock_offset = hv_read_reference_counter();
- hv_setup_sched_clock(read_hv_sched_clock_tsc);
-
- return true;
+ /*
+ * If TSC is invariant, then let it stay as the sched clock since it
+ * will be faster than reading the TSC page. But if not invariant, use
+ * the TSC page so that live migrations across hosts with different
+ * frequencies is handled correctly.
+ */
+ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT)) {
+ hv_sched_clock_offset = hv_read_reference_counter();
+ hv_setup_sched_clock(read_hv_sched_clock_tsc);
+ }
}
void __init hv_init_clocksource(void)
{
/*
- * Try to set up the TSC page clocksource. If it succeeds, we're
- * done. Otherwise, set up the MSR clocksource. At least one of
- * these will always be available except on very old versions of
- * Hyper-V on x86. In that case we won't have a Hyper-V
+ * Try to set up the TSC page clocksource, then the MSR clocksource.
+ * At least one of these will always be available except on very old
+ * versions of Hyper-V on x86. In that case we won't have a Hyper-V
* clocksource, but Linux will still run with a clocksource based
* on the emulated PIT or LAPIC timer.
+ *
+ * Never use the MSR clocksource as sched clock. It's too slow.
+ * Better to use the native sched clock as the fallback.
*/
- if (hv_init_tsc_clocksource())
- return;
-
- if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
- return;
-
- hv_read_reference_counter = read_hv_clock_msr;
- clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
+ hv_init_tsc_clocksource();
- hv_sched_clock_offset = hv_read_reference_counter();
- hv_setup_sched_clock(read_hv_sched_clock_msr);
+ if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)
+ clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
}
void __init hv_remap_tsc_clocksource(void)
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 089ce64b1c3f..154ee5f7954a 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -369,7 +369,7 @@ static int __init ingenic_tcu_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
+static int ingenic_tcu_suspend(struct device *dev)
{
struct ingenic_tcu *tcu = dev_get_drvdata(dev);
unsigned int cpu;
@@ -382,7 +382,7 @@ static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ingenic_tcu_resume(struct device *dev)
+static int ingenic_tcu_resume(struct device *dev)
{
struct ingenic_tcu *tcu = dev_get_drvdata(dev);
unsigned int cpu;
@@ -406,7 +406,7 @@ err_timer_clk_disable:
return ret;
}
-static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
+static const struct dev_pm_ops ingenic_tcu_pm_ops = {
/* _noirq: We want the TCU clocks to be gated last / ungated first */
.suspend_noirq = ingenic_tcu_suspend,
.resume_noirq = ingenic_tcu_resume,
@@ -415,9 +415,7 @@ static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
static struct platform_driver ingenic_tcu_driver = {
.driver = {
.name = "ingenic-tcu-timer",
-#ifdef CONFIG_PM_SLEEP
- .pm = &ingenic_tcu_pm_ops,
-#endif
+ .pm = pm_sleep_ptr(&ingenic_tcu_pm_ops),
.of_match_table = ingenic_tcu_of_match,
},
};
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 4efd0cf3b602..0d52e28fea4d 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -486,10 +486,10 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
* and use it. Note that the event timer uses the interrupt and it's the
* 2nd TTC hence the irq_of_parse_and_map(,1)
*/
- timer_baseaddr = of_iomap(timer, 0);
- if (!timer_baseaddr) {
+ timer_baseaddr = devm_of_iomap(&pdev->dev, timer, 0, NULL);
+ if (IS_ERR(timer_baseaddr)) {
pr_err("ERROR: invalid timer base address\n");
- return -ENXIO;
+ return PTR_ERR(timer_baseaddr);
}
irq = irq_of_parse_and_map(timer, 1);
@@ -513,20 +513,27 @@ static int __init ttc_timer_probe(struct platform_device *pdev)
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
- return PTR_ERR(clk_ce);
+ ret = PTR_ERR(clk_ce);
+ goto put_clk_cs;
}
ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
if (ret)
- return ret;
+ goto put_clk_ce;
ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
if (ret)
- return ret;
+ goto put_clk_ce;
pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0;
+
+put_clk_ce:
+ clk_put(clk_ce);
+put_clk_cs:
+ clk_put(clk_cs);
+ return ret;
}
static const struct of_device_id ttc_timer_of_match[] = {
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index ca3e4cbc80c6..28ab4f1a7c71 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <soc/imx/timer.h>
/*
* There are 4 versions of the timer hardware on Freescale MXC hardware.
@@ -25,6 +24,12 @@
* - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
* - MX6DL, MX6SX, MX6Q(rev1.1+)
*/
+enum imx_gpt_type {
+ GPT_TYPE_IMX1, /* i.MX1 */
+ GPT_TYPE_IMX21, /* i.MX21/27 */
+ GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
+ GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
+};
/* defines common for all i.MX */
#define MXC_TCTL 0x00
@@ -93,13 +98,11 @@ static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
tmp = readl_relaxed(imxtm->base + MXC_TCTL);
writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
}
-#define imx21_gpt_irq_disable imx1_gpt_irq_disable
static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
{
writel_relaxed(0, imxtm->base + V2_IR);
}
-#define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
{
@@ -108,13 +111,11 @@ static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
tmp = readl_relaxed(imxtm->base + MXC_TCTL);
writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
}
-#define imx21_gpt_irq_enable imx1_gpt_irq_enable
static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
{
writel_relaxed(1<<0, imxtm->base + V2_IR);
}
-#define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
@@ -131,7 +132,6 @@ static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
{
writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
}
-#define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
static void __iomem *sched_clock_reg;
@@ -296,7 +296,6 @@ static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
}
-#define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
{
@@ -343,10 +342,10 @@ static const struct imx_gpt_data imx21_gpt_data = {
.reg_tstat = MX1_2_TSTAT,
.reg_tcn = MX1_2_TCN,
.reg_tcmp = MX1_2_TCMP,
- .gpt_irq_enable = imx21_gpt_irq_enable,
- .gpt_irq_disable = imx21_gpt_irq_disable,
+ .gpt_irq_enable = imx1_gpt_irq_enable,
+ .gpt_irq_disable = imx1_gpt_irq_disable,
.gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
- .gpt_setup_tctl = imx21_gpt_setup_tctl,
+ .gpt_setup_tctl = imx1_gpt_setup_tctl,
.set_next_event = mx1_2_set_next_event,
};
@@ -365,9 +364,9 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
.reg_tstat = V2_TSTAT,
.reg_tcn = V2_TCN,
.reg_tcmp = V2_TCMP,
- .gpt_irq_enable = imx6dl_gpt_irq_enable,
- .gpt_irq_disable = imx6dl_gpt_irq_disable,
- .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge,
+ .gpt_irq_enable = imx31_gpt_irq_enable,
+ .gpt_irq_disable = imx31_gpt_irq_disable,
+ .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
.gpt_setup_tctl = imx6dl_gpt_setup_tctl,
.set_next_event = v2_set_next_event,
};
diff --git a/drivers/clocksource/timer-loongson1-pwm.c b/drivers/clocksource/timer-loongson1-pwm.c
new file mode 100644
index 000000000000..6335fee03017
--- /dev/null
+++ b/drivers/clocksource/timer-loongson1-pwm.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Clocksource driver for Loongson-1 SoC
+ *
+ * Copyright (c) 2023 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/sizes.h>
+#include "timer-of.h"
+
+/* Loongson-1 PWM Timer Register Definitions */
+#define PWM_CNTR 0x0
+#define PWM_HRC 0x4
+#define PWM_LRC 0x8
+#define PWM_CTRL 0xc
+
+/* PWM Control Register Bits */
+#define INT_LRC_EN BIT(11)
+#define INT_HRC_EN BIT(10)
+#define CNTR_RST BIT(7)
+#define INT_SR BIT(6)
+#define INT_EN BIT(5)
+#define PWM_SINGLE BIT(4)
+#define PWM_OE BIT(3)
+#define CNT_EN BIT(0)
+
+#define CNTR_WIDTH 24
+
+DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
+
+struct ls1x_clocksource {
+ void __iomem *reg_base;
+ unsigned long ticks_per_jiffy;
+ struct clocksource clksrc;
+};
+
+static inline struct ls1x_clocksource *to_ls1x_clksrc(struct clocksource *c)
+{
+ return container_of(c, struct ls1x_clocksource, clksrc);
+}
+
+static inline void ls1x_pwmtimer_set_period(unsigned int period,
+ struct timer_of *to)
+{
+ writel(period, timer_of_base(to) + PWM_LRC);
+ writel(period, timer_of_base(to) + PWM_HRC);
+}
+
+static inline void ls1x_pwmtimer_clear(struct timer_of *to)
+{
+ writel(0, timer_of_base(to) + PWM_CNTR);
+}
+
+static inline void ls1x_pwmtimer_start(struct timer_of *to)
+{
+ writel((INT_EN | PWM_OE | CNT_EN), timer_of_base(to) + PWM_CTRL);
+}
+
+static inline void ls1x_pwmtimer_stop(struct timer_of *to)
+{
+ writel(0, timer_of_base(to) + PWM_CTRL);
+}
+
+static inline void ls1x_pwmtimer_irq_ack(struct timer_of *to)
+{
+ int val;
+
+ val = readl(timer_of_base(to) + PWM_CTRL);
+ val |= INT_SR;
+ writel(val, timer_of_base(to) + PWM_CTRL);
+}
+
+static irqreturn_t ls1x_clockevent_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ ls1x_pwmtimer_irq_ack(to);
+ ls1x_pwmtimer_clear(to);
+ ls1x_pwmtimer_start(to);
+
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int ls1x_clockevent_set_state_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_set_period(timer_of_period(to), to);
+ ls1x_pwmtimer_clear(to);
+ ls1x_pwmtimer_start(to);
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static int ls1x_clockevent_tick_resume(struct clock_event_device *clkevt)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_start(to_timer_of(clkevt));
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *clkevt)
+{
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_stop(to_timer_of(clkevt));
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static int ls1x_clockevent_set_next(unsigned long evt,
+ struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ raw_spin_lock(&ls1x_timer_lock);
+ ls1x_pwmtimer_set_period(evt, to);
+ ls1x_pwmtimer_clear(to);
+ ls1x_pwmtimer_start(to);
+ raw_spin_unlock(&ls1x_timer_lock);
+
+ return 0;
+}
+
+static struct timer_of ls1x_to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+ .clkevt = {
+ .name = "ls1x-pwmtimer",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 300,
+ .set_next_event = ls1x_clockevent_set_next,
+ .set_state_periodic = ls1x_clockevent_set_state_periodic,
+ .set_state_oneshot = ls1x_clockevent_set_state_shutdown,
+ .set_state_shutdown = ls1x_clockevent_set_state_shutdown,
+ .tick_resume = ls1x_clockevent_tick_resume,
+ },
+ .of_irq = {
+ .handler = ls1x_clockevent_isr,
+ .flags = IRQF_TIMER,
+ },
+};
+
+/*
+ * Since the PWM timer overflows every two ticks, its not very useful
+ * to just read by itself. So use jiffies to emulate a free
+ * running counter:
+ */
+static u64 ls1x_clocksource_read(struct clocksource *cs)
+{
+ struct ls1x_clocksource *ls1x_cs = to_ls1x_clksrc(cs);
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ /* read the count */
+ count = readl(ls1x_cs->reg_base + PWM_CNTR);
+
+ /*
+ * It's possible for count to appear to go the wrong way for this
+ * reason:
+ *
+ * The timer counter underflows, but we haven't handled the resulting
+ * interrupt and incremented jiffies yet.
+ *
+ * Previous attempts to handle these cases intelligently were buggy, so
+ * we just do the simple thing now.
+ */
+ if (count < old_count && jifs == old_jifs)
+ count = old_count;
+
+ old_count = count;
+ old_jifs = jifs;
+
+ raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
+
+ return (u64)(jifs * ls1x_cs->ticks_per_jiffy) + count;
+}
+
+static struct ls1x_clocksource ls1x_clocksource = {
+ .clksrc = {
+ .name = "ls1x-pwmtimer",
+ .rating = 300,
+ .read = ls1x_clocksource_read,
+ .mask = CLOCKSOURCE_MASK(CNTR_WIDTH),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+};
+
+static int __init ls1x_pwm_clocksource_init(struct device_node *np)
+{
+ struct timer_of *to = &ls1x_to;
+ int ret;
+
+ ret = timer_of_init(np, to);
+ if (ret)
+ return ret;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ 0x1, GENMASK(CNTR_WIDTH - 1, 0));
+
+ ls1x_clocksource.reg_base = timer_of_base(to);
+ ls1x_clocksource.ticks_per_jiffy = timer_of_period(to);
+
+ return clocksource_register_hz(&ls1x_clocksource.clksrc,
+ timer_of_rate(to));
+}
+
+TIMER_OF_DECLARE(ls1x_pwm_clocksource, "loongson,ls1b-pwmtimer",
+ ls1x_pwm_clocksource_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2c839bd2b051..a1c51abddbc5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -38,7 +38,7 @@ choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1110_CPUFREQ
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if ARM64 || ARM
- default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if (X86_INTEL_PSTATE || X86_AMD_PSTATE) && SMP
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
help
This option sets which CPUFreq governor shall be loaded at
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 00476e94db90..438c9e75a04d 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -51,6 +51,23 @@ config X86_AMD_PSTATE
If in doubt, say N.
+config X86_AMD_PSTATE_DEFAULT_MODE
+ int "AMD Processor P-State default mode"
+ depends on X86_AMD_PSTATE
+ default 3 if X86_AMD_PSTATE
+ range 1 4
+ help
+ Select the default mode the amd-pstate driver will use on
+ supported hardware.
+ The value set has the following meanings:
+ 1 -> Disabled
+ 2 -> Passive
+ 3 -> Active (EPP)
+ 4 -> Guided
+
+ For details, take a look at:
+ <file:Documentation/admin-guide/pm/amd-pstate.rst>.
+
config X86_AMD_PSTATE_UT
tristate "selftest for AMD Processor P-State driver"
depends on X86 && ACPI_PROCESSOR
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 29904395e95f..b2f05d27167e 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -975,7 +975,7 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
/* don't keep reloading if cpufreq_driver exists */
if (cpufreq_get_current_driver())
- return -EEXIST;
+ return -ENODEV;
pr_debug("%s\n", __func__);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 5a3d4aa0f45a..81fba0dcbee9 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -62,7 +62,8 @@
static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
-static int cppc_state = AMD_PSTATE_DISABLE;
+static int cppc_state = AMD_PSTATE_UNDEFINED;
+static bool cppc_enabled;
/*
* AMD Energy Preference Performance (EPP)
@@ -228,7 +229,28 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
static inline int pstate_enable(bool enable)
{
- return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
+ int ret, cpu;
+ unsigned long logical_proc_id_mask = 0;
+
+ if (enable == cppc_enabled)
+ return 0;
+
+ for_each_present_cpu(cpu) {
+ unsigned long logical_id = topology_logical_die_id(cpu);
+
+ if (test_bit(logical_id, &logical_proc_id_mask))
+ continue;
+
+ set_bit(logical_id, &logical_proc_id_mask);
+
+ ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
+ enable);
+ if (ret)
+ return ret;
+ }
+
+ cppc_enabled = enable;
+ return 0;
}
static int cppc_enable(bool enable)
@@ -236,6 +258,9 @@ static int cppc_enable(bool enable)
int cpu, ret = 0;
struct cppc_perf_ctrls perf_ctrls;
+ if (enable == cppc_enabled)
+ return 0;
+
for_each_present_cpu(cpu) {
ret = cppc_set_enable(cpu, enable);
if (ret)
@@ -251,6 +276,7 @@ static int cppc_enable(bool enable)
}
}
+ cppc_enabled = enable;
return ret;
}
@@ -444,9 +470,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
return 0;
}
-static int amd_pstate_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
struct amd_cpudata *cpudata = policy->driver_data;
@@ -465,26 +490,51 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
cpudata->max_freq);
- cpufreq_freq_transition_begin(policy, &freqs);
+ WARN_ON(fast_switch && !policy->fast_switch_enabled);
+ /*
+ * If fast_switch is desired, then there aren't any registered
+ * transition notifiers. See comment for
+ * cpufreq_enable_fast_switch().
+ */
+ if (!fast_switch)
+ cpufreq_freq_transition_begin(policy, &freqs);
+
amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, false, policy->governor->flags);
- cpufreq_freq_transition_end(policy, &freqs, false);
+ max_perf, fast_switch, policy->governor->flags);
+
+ if (!fast_switch)
+ cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
}
+static int amd_pstate_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ return amd_pstate_update_freq(policy, target_freq, false);
+}
+
+static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ return amd_pstate_update_freq(policy, target_freq, true);
+}
+
static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long _min_perf,
unsigned long target_perf,
unsigned long capacity)
{
unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf;
+ cap_perf, lowest_nonlinear_perf, max_freq;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
+ unsigned int target_freq;
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
des_perf = cap_perf;
if (target_perf < capacity)
@@ -501,6 +551,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (max_perf < min_perf)
max_perf = min_perf;
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ target_freq = div_u64(des_perf * max_freq, max_perf);
+ policy->cur = target_freq;
+
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
cpufreq_cpu_put(policy);
@@ -715,6 +769,7 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
+ policy->fast_switch_possible = false;
kfree(cpudata);
return 0;
@@ -1016,6 +1071,26 @@ static const struct attribute_group amd_pstate_global_attr_group = {
.attrs = pstate_global_attributes,
};
+static bool amd_pstate_acpi_pm_profile_server(void)
+{
+ switch (acpi_gbl_FADT.preferred_profile) {
+ case PM_ENTERPRISE_SERVER:
+ case PM_SOHO_SERVER:
+ case PM_PERFORMANCE_SERVER:
+ return true;
+ }
+ return false;
+}
+
+static bool amd_pstate_acpi_pm_profile_undefined(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED)
+ return true;
+ if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES)
+ return true;
+ return false;
+}
+
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -1073,13 +1148,16 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
/*
- * Set the policy to powersave to provide a valid fallback value in case
+ * Set the policy to provide a valid fallback value in case
* the default cpufreq governor is neither powersave nor performance.
*/
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ if (amd_pstate_acpi_pm_profile_server() ||
+ amd_pstate_acpi_pm_profile_undefined())
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
if (boot_cpu_has(X86_FEATURE_CPPC)) {
- policy->fast_switch_possible = true;
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret)
return ret;
@@ -1102,7 +1180,6 @@ free_cpudata1:
static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
pr_debug("CPU %d exiting\n", policy->cpu);
- policy->fast_switch_possible = false;
return 0;
}
@@ -1309,6 +1386,7 @@ static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
.target = amd_pstate_target,
+ .fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
.suspend = amd_pstate_cpu_suspend,
@@ -1328,10 +1406,29 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.online = amd_pstate_epp_cpu_online,
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
- .name = "amd_pstate_epp",
+ .name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
+static int __init amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int __init amd_pstate_init(void)
{
struct device *dev_root;
@@ -1339,15 +1436,6 @@ static int __init amd_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
- /*
- * by default the pstate driver is disabled to load
- * enable the amd_pstate passive mode driver explicitly
- * with amd_pstate=passive or other modes in kernel command line
- */
- if (cppc_state == AMD_PSTATE_DISABLE) {
- pr_info("driver load is disabled, boot with specific mode to enable this\n");
- return -ENODEV;
- }
if (!acpi_cpc_valid()) {
pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
@@ -1358,6 +1446,33 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
+ switch (cppc_state) {
+ case AMD_PSTATE_UNDEFINED:
+ /* Disable on the following configs by default:
+ * 1. Undefined platforms
+ * 2. Server platforms
+ * 3. Shared memory designs
+ */
+ if (amd_pstate_acpi_pm_profile_undefined() ||
+ amd_pstate_acpi_pm_profile_server() ||
+ !boot_cpu_has(X86_FEATURE_CPPC)) {
+ pr_info("driver load is disabled, boot with specific mode to enable this\n");
+ return -ENODEV;
+ }
+ ret = amd_pstate_set_driver(CONFIG_X86_AMD_PSTATE_DEFAULT_MODE);
+ if (ret)
+ return ret;
+ break;
+ case AMD_PSTATE_DISABLE:
+ return -ENODEV;
+ case AMD_PSTATE_PASSIVE:
+ case AMD_PSTATE_ACTIVE:
+ case AMD_PSTATE_GUIDED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
@@ -1410,21 +1525,7 @@ static int __init amd_pstate_param(char *str)
size = strlen(str);
mode_idx = get_mode_idx_from_str(str, size);
- if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
- cppc_state = mode_idx;
- if (cppc_state == AMD_PSTATE_DISABLE)
- pr_info("driver is explicitly disabled\n");
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
-
- if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
-
- return 0;
- }
-
- return -EINVAL;
+ return amd_pstate_set_driver(mode_idx);
}
early_param("amd_pstate", amd_pstate_param);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6b52ebe5a890..50bbc969ffe5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2828,7 +2828,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
(driver_data->setpolicy && (driver_data->target_index ||
driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
- (!driver_data->online != !driver_data->offline))
+ (!driver_data->online != !driver_data->offline) ||
+ (driver_data->adjust_perf && !driver_data->fast_switch))
return -EINVAL;
pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2548ec92faa2..f29182512b98 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -824,6 +824,8 @@ static ssize_t store_energy_performance_preference(
err = cpufreq_start_governor(policy);
if (!ret)
ret = err;
+ } else {
+ ret = 0;
}
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 1d2cfea9858a..73efbcf5513b 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -583,7 +583,7 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev)
/* Skip initialization if another cpufreq driver is there. */
if (cpufreq_get_current_driver())
- return -EEXIST;
+ return -ENODEV;
if (acpi_disabled)
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 8e929f6602ce..737a026ef58a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -145,7 +145,7 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
instrumentation_begin();
- time_start = ns_to_ktime(local_clock());
+ time_start = ns_to_ktime(local_clock_noinstr());
tick_freeze();
/*
@@ -169,7 +169,7 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
tick_unfreeze();
start_critical_timings();
- time_end = ns_to_ktime(local_clock());
+ time_end = ns_to_ktime(local_clock_noinstr());
dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
dev->states_usage[index].s2idle_usage++;
@@ -243,7 +243,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
sched_idle_set_state(target_state);
trace_cpu_idle(index, dev->cpu);
- time_start = ns_to_ktime(local_clock());
+ time_start = ns_to_ktime(local_clock_noinstr());
stop_critical_timings();
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
@@ -276,7 +276,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
start_critical_timings();
sched_clock_idle_wakeup_event();
- time_end = ns_to_ktime(local_clock());
+ time_end = ns_to_ktime(local_clock_noinstr());
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index bdcfeaecd228..9b6d90a72601 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -15,7 +15,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
{
u64 time_start;
- time_start = local_clock();
+ time_start = local_clock_noinstr();
dev->poll_time_limit = false;
@@ -32,7 +32,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
continue;
loop_count = 0;
- if (local_clock() - time_start > limit) {
+ if (local_clock_noinstr() - time_start > limit) {
dev->poll_time_limit = true;
break;
}
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 10fe9f73a5fb..f2dd66706c10 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -8,7 +8,7 @@
* keysize in CBC and ECB mode.
* Add support also for DES and 3DES in CBC and ECB mode.
*
- * You could find the datasheet in Documentation/arm/sunxi.rst
+ * You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include "sun4i-ss.h"
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 006e40133c28..51a3a7b5b985 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -6,7 +6,7 @@
*
* Core file which registers crypto algorithms supported by the SS.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index d28292762b32..f7893e4ac59d 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -6,7 +6,7 @@
*
* This file add support for MD5 and SHA1.
*
- * You could find the datasheet in Documentation/arm/sunxi.rst
+ * You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include "sun4i-ss.h"
#include <asm/unaligned.h>
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index ba59c7a48825..6c5d4aa6453c 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -8,7 +8,7 @@
* Support MD5 and SHA1 hash algorithms.
* Support DES and 3DES
*
- * You could find the datasheet in Documentation/arm/sunxi.rst
+ * You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/clk.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 74b4e910a38d..c13550090785 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -8,7 +8,7 @@
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/bottom_half.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index a6865ff4d400..07ea0cc82b16 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -7,7 +7,7 @@
*
* Core file which registers crypto algorithms supported by the CryptoEngine.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 8b5b9b9d04c3..930ad157004c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -7,7 +7,7 @@
*
* This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
*
- * You could find the datasheet in Documentation/arm/sunxi.rst
+ * You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index b3cc43ea6c8a..80815379f6fc 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -7,7 +7,7 @@
*
* This file handle the PRNG
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include "sun8i-ce.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index e2b9b9104694..9c35f2a83eda 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -7,7 +7,7 @@
*
* This file handle the TRNG
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include "sun8i-ce.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 16966cc94e24..381a90fbeaff 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -8,7 +8,7 @@
* This file add support for AES cipher with 128,192,256 bits keysize in
* CBC and ECB mode.
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/bottom_half.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index c9dc06f97857..3dd844b40ff7 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -7,7 +7,7 @@
*
* Core file which registers crypto algorithms supported by the SecuritySystem
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/clk.h>
#include <linux/crypto.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 577bf636f7fb..a4b67d130d11 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -7,7 +7,7 @@
*
* This file add support for MD5 and SHA1/SHA224/SHA256.
*
- * You could find the datasheet in Documentation/arm/sunxi.rst
+ * You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 70c7b5d571b8..a923cfc6553f 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -7,7 +7,7 @@
*
* This file handle the PRNG found in the SS
*
- * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include "sun8i-ss.h"
#include <linux/dma-mapping.h>
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 23b9ff920d7e..bea9cf31a12d 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1028,7 +1028,7 @@ static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
* cxl_dev_state_identify() - Send the IDENTIFY command to the device.
* @cxlds: The device data for the operation
*
- * Return: 0 if identify was executed successfully.
+ * Return: 0 if identify was executed successfully or media not ready.
*
* This will dispatch the identify command to the device and on success populate
* structures to be exported to sysfs.
@@ -1041,6 +1041,9 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
u32 val;
int rc;
+ if (!cxlds->media_ready)
+ return 0;
+
mbox_cmd = (struct cxl_mbox_cmd) {
.opcode = CXL_MBOX_OP_IDENTIFY,
.size_out = sizeof(id),
@@ -1102,6 +1105,13 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
struct device *dev = cxlds->dev;
int rc;
+ if (!cxlds->media_ready) {
+ cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
+ cxlds->ram_res = DEFINE_RES_MEM(0, 0);
+ cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
+ return 0;
+ }
+
cxlds->dpa_res =
(struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index f332fe7af92b..67f4ab6daa34 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -101,23 +101,57 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
-/*
- * Wait up to @media_ready_timeout for the device to report memory
- * active.
- */
-int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec;
+ bool valid = false;
+ int rc, i;
+ u32 temp;
+
+ if (id > CXL_DVSEC_RANGE_MAX)
+ return -EINVAL;
+
+ /* Check MEM INFO VALID bit first, give up after 1s */
+ i = 1;
+ do {
+ rc = pci_read_config_dword(pdev,
+ d + CXL_DVSEC_RANGE_SIZE_LOW(id),
+ &temp);
+ if (rc)
+ return rc;
+
+ valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
+ if (valid)
+ break;
+ msleep(1000);
+ } while (i--);
+
+ if (!valid) {
+ dev_err(&pdev->dev,
+ "Timeout awaiting memory range %d valid after 1s.\n",
+ id);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int d = cxlds->cxl_dvsec;
bool active = false;
- u64 md_status;
int rc, i;
+ u32 temp;
- for (i = media_ready_timeout; i; i--) {
- u32 temp;
+ if (id > CXL_DVSEC_RANGE_MAX)
+ return -EINVAL;
+ /* Check MEM ACTIVE bit, up to 60s timeout by default */
+ for (i = media_ready_timeout; i; i--) {
rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
+ pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
if (rc)
return rc;
@@ -134,6 +168,39 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
return -ETIMEDOUT;
}
+ return 0;
+}
+
+/*
+ * Wait up to @media_ready_timeout for the device to report memory
+ * active.
+ */
+int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec;
+ int rc, i, hdm_count;
+ u64 md_status;
+ u16 cap;
+
+ rc = pci_read_config_word(pdev,
+ d + CXL_DVSEC_CAP_OFFSET, &cap);
+ if (rc)
+ return rc;
+
+ hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
+ for (i = 0; i < hdm_count; i++) {
+ rc = cxl_dvsec_mem_range_valid(cxlds, i);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < hdm_count; i++) {
+ rc = cxl_dvsec_mem_range_active(cxlds, i);
+ if (rc)
+ return rc;
+ }
+
md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
if (!CXLMDEV_READY(md_status))
return -EIO;
@@ -241,17 +308,36 @@ static void disable_hdm(void *_cxlhdm)
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
}
-static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
+int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
{
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ void __iomem *hdm;
u32 global_ctrl;
+ /*
+ * If the hdm capability was not mapped there is nothing to enable and
+ * the caller is responsible for what happens next. For example,
+ * emulate a passthrough decoder.
+ */
+ if (IS_ERR(cxlhdm))
+ return 0;
+
+ hdm = cxlhdm->regs.hdm_decoder;
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+
+ /*
+ * If the HDM decoder capability was enabled on entry, skip
+ * registering disable_hdm() since this decode capability may be
+ * owned by platform firmware.
+ */
+ if (global_ctrl & CXL_HDM_DECODER_ENABLE)
+ return 0;
+
writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
- return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
+ return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm);
}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL);
int cxl_dvsec_rr_decode(struct device *dev, int d,
struct cxl_endpoint_dvsec_info *info)
@@ -425,7 +511,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
if (info->mem_enabled)
return 0;
- rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+ rc = devm_cxl_enable_hdm(port, cxlhdm);
if (rc)
return rc;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index da2068475fa2..e7c284c890bc 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -750,11 +750,10 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
parent_port = parent_dport ? parent_dport->port : NULL;
if (IS_ERR(port)) {
- dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n",
- dev_name(&port->dev),
- parent_port ? " to " : "",
+ dev_dbg(uport, "Failed to add%s%s%s: %ld\n",
+ parent_port ? " port to " : "",
parent_port ? dev_name(&parent_port->dev) : "",
- parent_port ? "" : " (root port)",
+ parent_port ? "" : " root port",
PTR_ERR(port));
} else {
dev_dbg(uport, "%s added%s%s%s\n",
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 044a92d9813e..f93a28538962 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -710,6 +710,7 @@ struct cxl_endpoint_dvsec_info {
struct cxl_hdm;
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info);
+int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm);
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index db12b6313afb..a2845a7a69d8 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -266,6 +266,7 @@ struct cxl_poison_state {
* @regs: Parsed register blocks
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
+ * @media_ready: Indicate whether the device media is usable
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
* @lsa_size: Size of Label Storage Area
@@ -303,6 +304,7 @@ struct cxl_dev_state {
int cxl_dvsec;
bool rcd;
+ bool media_ready;
size_t payload_size;
size_t lsa_size;
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 0465ef963cd6..7c02e55b8042 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -31,6 +31,8 @@
#define CXL_DVSEC_RANGE_BASE_LOW(i) (0x24 + (i * 0x10))
#define CXL_DVSEC_MEM_BASE_LOW_MASK GENMASK(31, 28)
+#define CXL_DVSEC_RANGE_MAX 2
+
/* CXL 2.0 8.1.4: Non-CXL Function Map DVSEC */
#define CXL_DVSEC_FUNCTION_MAP 2
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 10caf180b3fa..519edd0eb196 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -124,6 +124,9 @@ static int cxl_mem_probe(struct device *dev)
struct dentry *dentry;
int rc;
+ if (!cxlds->media_ready)
+ return -EBUSY;
+
/*
* Someone is trying to reattach this device after it lost its port
* connection (an endpoint port previously registered by this memdev was
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index f7a5b8e9c102..0872f2233ed0 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -708,6 +708,12 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
+ rc = cxl_await_media_ready(cxlds);
+ if (rc == 0)
+ cxlds->media_ready = true;
+ else
+ dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
+
rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index eb57324c4ad4..c23b6164e1c0 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -60,13 +60,17 @@ static int discover_region(struct device *dev, void *root)
static int cxl_switch_port_probe(struct cxl_port *port)
{
struct cxl_hdm *cxlhdm;
- int rc;
+ int rc, nr_dports;
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
+ nr_dports = devm_cxl_port_enumerate_dports(port);
+ if (nr_dports < 0)
+ return nr_dports;
cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ rc = devm_cxl_enable_hdm(port, cxlhdm);
+ if (rc)
+ return rc;
+
if (!IS_ERR(cxlhdm))
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
@@ -75,7 +79,7 @@ static int cxl_switch_port_probe(struct cxl_port *port)
return PTR_ERR(cxlhdm);
}
- if (rc == 1) {
+ if (nr_dports == 1) {
dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
return devm_cxl_add_passthrough_decoder(port);
}
@@ -113,12 +117,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
return rc;
- rc = cxl_await_media_ready(cxlds);
- if (rc) {
- dev_err(&port->dev, "Media not active (%d)\n", rc);
- return rc;
- }
-
rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
if (rc)
return rc;
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 88414445adf3..245898f1a88e 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -518,6 +518,7 @@ static struct platform_driver exynos_bus_platdrv = {
};
module_platform_driver(exynos_bus_platdrv);
+MODULE_SOFTDEP("pre: exynos_ppmu");
MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index e5458ada5197..6354622eda65 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -127,7 +127,7 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct mtk_ccifreq_drv *drv = dev_get_drvdata(dev);
- struct clk *cci_pll = clk_get_parent(drv->cci_clk);
+ struct clk *cci_pll;
struct dev_pm_opp *opp;
unsigned long opp_rate;
int voltage, pre_voltage, inter_voltage, target_voltage, ret;
@@ -139,6 +139,7 @@ static int mtk_ccifreq_target(struct device *dev, unsigned long *freq,
return 0;
inter_voltage = drv->inter_voltage;
+ cci_pll = clk_get_parent(drv->cci_clk);
opp_rate = *freq;
opp = devfreq_recommended_opp(dev, &opp_rate, 1);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 01f2e86f3f7c..12cf6bb2e3ce 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -12,7 +12,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/udmabuf.h>
-#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/iosys-map.h>
@@ -207,9 +206,7 @@ static long udmabuf_create(struct miscdevice *device,
struct udmabuf *ubuf;
struct dma_buf *buf;
pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
- struct page *page, *hpage = NULL;
- pgoff_t subpgoff, maxsubpgs;
- struct hstate *hpstate;
+ struct page *page;
int seals, ret = -EINVAL;
u32 i, flags;
@@ -245,7 +242,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!memfd)
goto err;
mapping = memfd->f_mapping;
- if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
+ if (!shmem_mapping(mapping))
goto err;
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
if (seals == -EINVAL)
@@ -256,48 +253,16 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
pgoff = list[i].offset >> PAGE_SHIFT;
pgcnt = list[i].size >> PAGE_SHIFT;
- if (is_file_hugepages(memfd)) {
- hpstate = hstate_file(memfd);
- pgoff = list[i].offset >> huge_page_shift(hpstate);
- subpgoff = (list[i].offset &
- ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
- maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
- }
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
- if (is_file_hugepages(memfd)) {
- if (!hpage) {
- hpage = find_get_page_flags(mapping, pgoff,
- FGP_ACCESSED);
- if (!hpage) {
- ret = -EINVAL;
- goto err;
- }
- }
- page = hpage + subpgoff;
- get_page(page);
- subpgoff++;
- if (subpgoff == maxsubpgs) {
- put_page(hpage);
- hpage = NULL;
- subpgoff = 0;
- pgoff++;
- }
- } else {
- page = shmem_read_mapping_page(mapping,
- pgoff + pgidx);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto err;
- }
+ page = shmem_read_mapping_page(mapping, pgoff + pgidx);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err;
}
ubuf->pages[pgbuf++] = page;
}
fput(memfd);
memfd = NULL;
- if (hpage) {
- put_page(hpage);
- hpage = NULL;
- }
}
exp_info.ops = &udmabuf_ops;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 8858470246e1..ee3a219e3a89 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -132,7 +132,7 @@
#define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */
#define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */
-#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */
+#define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */
#define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */
#define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */
#define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */
@@ -153,8 +153,6 @@
#define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */
/* Bitfields in CFG */
-#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
-
#define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */
#define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */
#define ATC_SRC_REP BIT(8) /* Source Replay Mod */
@@ -181,10 +179,15 @@
#define ATC_DPIP_HOLE GENMASK(15, 0)
#define ATC_DPIP_BOUNDARY GENMASK(25, 16)
-#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \
- FIELD_PREP(ATC_SRC_PER, (id)))
-#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \
- FIELD_PREP(ATC_DST_PER, (id)))
+#define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */
+#define ATC_SRC_PER_ID(id) \
+ ({ typeof(id) _id = (id); \
+ FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
+ FIELD_PREP(ATC_SRC_PER, _id); })
+#define ATC_DST_PER_ID(id) \
+ ({ typeof(id) _id = (id); \
+ FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
+ FIELD_PREP(ATC_DST_PER, _id); })
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7da6d9b6098e..c3b37168b21f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1102,6 +1102,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
NULL,
src_addr, dst_addr,
xt, xt->sgl);
+ if (!first)
+ return NULL;
/* Length of the block is (BLEN+1) microblocks. */
for (i = 0; i < xt->numf - 1; i++)
@@ -1132,8 +1134,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
src_addr, dst_addr,
xt, chunk);
if (!desc) {
- list_splice_tail_init(&first->descs_list,
- &atchan->free_descs_list);
+ if (first)
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
return NULL;
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index ecbf67c2ad2b..d32deb9b4e3d 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -277,7 +277,6 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
if (wq_dedicated(wq)) {
rc = idxd_wq_set_pasid(wq, pasid);
if (rc < 0) {
- iommu_sva_unbind_device(sva);
dev_err(dev, "wq set pasid failed: %d\n", rc);
goto failed_set_pasid;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 0d9257fbdfb0..b4731fe6bbc1 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd)
return true;
}
-static bool _start(struct pl330_thread *thrd)
+static bool pl330_start_thread(struct pl330_thread *thrd)
{
switch (_state(thrd)) {
case PL330_STATE_FAULT_COMPLETING:
@@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330)
thrd->req_running = -1;
/* Get going again ASAP */
- _start(thrd);
+ pl330_start_thread(thrd);
/* For now, just make a list of callbacks to be done */
list_add_tail(&descdone->rqd, &pl330->req_done);
@@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
}
@@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock);
- _start(pch->thread);
+ pl330_start_thread(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = false;
}
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index fc3a2a05ab7b..b8329a23728d 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -5527,7 +5527,7 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
-static int udma_pm_suspend(struct device *dev)
+static int __maybe_unused udma_pm_suspend(struct device *dev)
{
struct udma_dev *ud = dev_get_drvdata(dev);
struct dma_device *dma_dev = &ud->ddev;
@@ -5549,7 +5549,7 @@ static int udma_pm_suspend(struct device *dev)
return 0;
}
-static int udma_pm_resume(struct device *dev)
+static int __maybe_unused udma_pm_resume(struct device *dev)
{
struct udma_dev *ud = dev_get_drvdata(dev);
struct dma_device *dma_dev = &ud->ddev;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 68f576700911..110e99b86a66 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -550,4 +550,15 @@ config EDAC_ZYNQMP
Xilinx ZynqMP OCM (On Chip Memory) controller. It can also be
built as a module. In that case it will be called zynqmp_edac.
+config EDAC_NPCM
+ tristate "Nuvoton NPCM DDR Memory Controller"
+ depends on (ARCH_NPCM || COMPILE_TEST)
+ help
+ Support for error detection and correction on the Nuvoton NPCM DDR
+ memory controller.
+
+ The memory controller supports single bit error correction, double bit
+ error detection (in-line ECC in which a section 1/8th of the memory
+ device used to store data is used for ECC storage).
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 9b025c5b3061..61945d3113cc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -84,4 +84,5 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
+obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 5c4292e65b96..597dae7692b1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -975,6 +975,74 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
return csrow;
}
+/*
+ * See AMD PPR DF::LclNodeTypeMap
+ *
+ * This register gives information for nodes of the same type within a system.
+ *
+ * Reading this register from a GPU node will tell how many GPU nodes are in the
+ * system and what the lowest AMD Node ID value is for the GPU nodes. Use this
+ * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
+ */
+static struct local_node_map {
+ u16 node_count;
+ u16 base_node_id;
+} gpu_node_map;
+
+#define PCI_DEVICE_ID_AMD_MI200_DF_F1 0x14d1
+#define REG_LOCAL_NODE_TYPE_MAP 0x144
+
+/* Local Node Type Map (LNTM) fields */
+#define LNTM_NODE_COUNT GENMASK(27, 16)
+#define LNTM_BASE_NODE_ID GENMASK(11, 0)
+
+static int gpu_get_node_map(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+ u32 tmp;
+
+ /*
+ * Node ID 0 is reserved for CPUs.
+ * Therefore, a non-zero Node ID means we've already cached the values.
+ */
+ if (gpu_node_map.base_node_id)
+ return 0;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
+ if (ret)
+ goto out;
+
+ gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
+ gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
+
+out:
+ pci_dev_put(pdev);
+ return ret;
+}
+
+static int fixup_node_id(int node_id, struct mce *m)
+{
+ /* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */
+ u8 nid = (m->ipid >> 44) & 0xF;
+
+ if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2)
+ return node_id;
+
+ /* Nodes below the GPU base node are CPU nodes and don't need a fixup. */
+ if (nid < gpu_node_map.base_node_id)
+ return node_id;
+
+ /* Convert the hardware-provided AMD Node ID to a Linux logical one. */
+ return nid - gpu_node_map.base_node_id + 1;
+}
+
/* Protect the PCI config register pairs used for DF indirect access. */
static DEFINE_MUTEX(df_indirect_mutex);
@@ -1426,12 +1494,47 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
+static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
+ int csrow_nr, int dimm)
+{
+ u32 msb, weight, num_zero_bits;
+ u32 addr_mask_deinterleaved;
+ int size = 0;
+
+ /*
+ * The number of zero bits in the mask is equal to the number of bits
+ * in a full mask minus the number of bits in the current mask.
+ *
+ * The MSB is the number of bits in the full mask because BIT[0] is
+ * always 0.
+ *
+ * In the special 3 Rank interleaving case, a single bit is flipped
+ * without swapping with the most significant bit. This can be handled
+ * by keeping the MSB where it is and ignoring the single zero bit.
+ */
+ msb = fls(addr_mask_orig) - 1;
+ weight = hweight_long(addr_mask_orig);
+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
+
+ /* Take the number of zero bits off from the top of the mask. */
+ addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+
+ edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+ edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ size = (addr_mask_deinterleaved >> 2) + 1;
+
+ /* Return size in MBs. */
+ return size >> 10;
+}
+
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 addr_mask_orig, addr_mask_deinterleaved;
- u32 msb, weight, num_zero_bits;
int cs_mask_nr = csrow_nr;
+ u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@@ -1475,33 +1578,7 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
else
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
- /*
- * The number of zero bits in the mask is equal to the number of bits
- * in a full mask minus the number of bits in the current mask.
- *
- * The MSB is the number of bits in the full mask because BIT[0] is
- * always 0.
- *
- * In the special 3 Rank interleaving case, a single bit is flipped
- * without swapping with the most significant bit. This can be handled
- * by keeping the MSB where it is and ignoring the single zero bit.
- */
- msb = fls(addr_mask_orig) - 1;
- weight = hweight_long(addr_mask_orig);
- num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
-
- /* Take the number of zero bits off from the top of the mask. */
- addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
-
- edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
- edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
- edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
-
- /* Register [31:1] = Address [39:9]. Size is in kBs here. */
- size = (addr_mask_deinterleaved >> 2) + 1;
-
- /* Return size in MBs. */
- return size >> 10;
+ return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -2992,6 +3069,8 @@ static void decode_umc_error(int node_id, struct mce *m)
struct err_info err;
u64 sys_addr;
+ node_id = fixup_node_id(node_id, m);
+
mci = edac_mc_find(node_id);
if (!mci)
return;
@@ -3675,6 +3754,227 @@ static int umc_hw_info_get(struct amd64_pvt *pvt)
return 0;
}
+/*
+ * The CPUs have one channel per UMC, so UMC number is equivalent to a
+ * channel number. The GPUs have 8 channels per UMC, so the UMC number no
+ * longer works as a channel number.
+ *
+ * The channel number within a GPU UMC is given in MCA_IPID[15:12].
+ * However, the IDs are split such that two UMC values go to one UMC, and
+ * the channel numbers are split in two groups of four.
+ *
+ * Refer to comment on gpu_get_umc_base().
+ *
+ * For example,
+ * UMC0 CH[3:0] = 0x0005[3:0]000
+ * UMC0 CH[7:4] = 0x0015[3:0]000
+ * UMC1 CH[3:0] = 0x0025[3:0]000
+ * UMC1 CH[7:4] = 0x0035[3:0]000
+ */
+static void gpu_get_err_info(struct mce *m, struct err_info *err)
+{
+ u8 ch = (m->ipid & GENMASK(31, 0)) >> 20;
+ u8 phy = ((m->ipid >> 12) & 0xf);
+
+ err->channel = ch % 2 ? phy + 4 : phy;
+ err->csrow = phy;
+}
+
+static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ unsigned int cs_mode, int csrow_nr)
+{
+ u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
+
+ return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
+}
+
+static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+{
+ int size, cs_mode, cs = 0;
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+ cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
+
+ for_each_chip_select(cs, ctrl, pvt) {
+ size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
+ amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size);
+ }
+}
+
+static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i;
+
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
+
+ edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
+ edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
+ edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
+ edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i);
+
+ gpu_debug_display_dimm_sizes(pvt, i);
+ }
+}
+
+static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
+{
+ u32 nr_pages;
+ int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
+
+ nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
+ nr_pages <<= 20 - PAGE_SHIFT;
+
+ edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
+ edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
+
+ return nr_pages;
+}
+
+static void gpu_init_csrows(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ struct dimm_info *dimm;
+ u8 umc, cs;
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ dimm = mci->csrows[umc]->channels[cs]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_HBM2;
+ dimm->dtype = DEV_X16;
+ dimm->grain = 64;
+ }
+ }
+}
+
+static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ mci->mtype_cap = MEM_FLAG_HBM2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->edac_cap = EDAC_FLAG_EC;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->ctl_name = pvt->ctl_name;
+ mci->dev_name = pci_name(pvt->F3);
+ mci->ctl_page_to_phys = NULL;
+
+ gpu_init_csrows(mci);
+}
+
+/* ECC is enabled by default on GPU nodes */
+static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
+{
+ return true;
+}
+
+static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
+{
+ /*
+ * On CPUs, there is one channel per UMC, so UMC numbering equals
+ * channel numbering. On GPUs, there are eight channels per UMC,
+ * so the channel numbering is different from UMC numbering.
+ *
+ * On CPU nodes channels are selected in 6th nibble
+ * UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000;
+ *
+ * On GPU nodes channels are selected in 3rd nibble
+ * HBM chX[3:0]= [Y ]5X[3:0]000;
+ * HBM chX[7:4]= [Y+1]5X[3:0]000
+ */
+ umc *= 2;
+
+ if (channel >= 4)
+ umc++;
+
+ return 0x50000 + (umc << 20) + ((channel % 4) << 12);
+}
+
+static void gpu_read_mc_regs(struct amd64_pvt *pvt)
+{
+ u8 nid = pvt->mc_node_id;
+ struct amd64_umc *umc;
+ u32 i, umc_base;
+
+ /* Read registers from each UMC */
+ for_each_umc(i) {
+ umc_base = gpu_get_umc_base(i, 0);
+ umc = &pvt->umc[i];
+
+ amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
+ amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
+ amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
+ }
+}
+
+static void gpu_read_base_mask(struct amd64_pvt *pvt)
+{
+ u32 base_reg, mask_reg;
+ u32 *base, *mask;
+ int umc, cs;
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR;
+ base = &pvt->csels[umc].csbases[cs];
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
+ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base, base_reg);
+ }
+
+ mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK;
+ mask = &pvt->csels[umc].csmasks[cs];
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
+ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask, mask_reg);
+ }
+ }
+ }
+}
+
+static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
+{
+ int umc;
+
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 8;
+ pvt->csels[umc].m_cnt = 8;
+ }
+}
+
+static int gpu_hw_info_get(struct amd64_pvt *pvt)
+{
+ int ret;
+
+ ret = gpu_get_node_map();
+ if (ret)
+ return ret;
+
+ pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc)
+ return -ENOMEM;
+
+ gpu_prep_chip_selects(pvt);
+ gpu_read_base_mask(pvt);
+ gpu_read_mc_regs(pvt);
+
+ return 0;
+}
+
static void hw_info_put(struct amd64_pvt *pvt)
{
pci_dev_put(pvt->F1);
@@ -3690,6 +3990,14 @@ static struct low_ops umc_ops = {
.get_err_info = umc_get_err_info,
};
+static struct low_ops gpu_ops = {
+ .hw_info_get = gpu_hw_info_get,
+ .ecc_enabled = gpu_ecc_enabled,
+ .setup_mci_misc_attrs = gpu_setup_mci_misc_attrs,
+ .dump_misc_regs = gpu_dump_misc_regs,
+ .get_err_info = gpu_get_err_info,
+};
+
/* Use Family 16h versions for defaults and adjust as needed below. */
static struct low_ops dct_ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -3813,9 +4121,27 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x20 ... 0x2f:
pvt->ctl_name = "F19h_M20h";
break;
+ case 0x30 ... 0x3f:
+ if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
+ pvt->ctl_name = "MI200";
+ pvt->max_mcs = 4;
+ pvt->ops = &gpu_ops;
+ } else {
+ pvt->ctl_name = "F19h_M30h";
+ pvt->max_mcs = 8;
+ }
+ break;
case 0x50 ... 0x5f:
pvt->ctl_name = "F19h_M50h";
break;
+ case 0x60 ... 0x6f:
+ pvt->ctl_name = "F19h_M60h";
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x70 ... 0x7f:
+ pvt->ctl_name = "F19h_M70h";
+ pvt->flags.zn_regs_v2 = 1;
+ break;
case 0xa0 ... 0xaf:
pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
@@ -3846,11 +4172,17 @@ static int init_one_instance(struct amd64_pvt *pvt)
struct edac_mc_layer layers[2];
int ret = -ENOMEM;
+ /*
+ * For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should
+ * be swapped to fit into the layers.
+ */
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
+ pvt->max_mcs : pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = pvt->max_mcs;
+ layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
+ pvt->csels[0].b_cnt : pvt->max_mcs;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
@@ -4074,8 +4406,6 @@ static int __init amd64_edac_init(void)
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif
- printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
-
return 0;
err_pci:
@@ -4121,7 +4451,7 @@ module_exit(amd64_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
-MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " EDAC_AMD64_VERSION);
+MODULE_DESCRIPTION("MC support for AMD64 memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index e84fe0d4120a..5a4e4a59682b 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/edac.h>
+#include <linux/bitfield.h>
#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#include "edac_module.h"
@@ -85,7 +86,6 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "3.5.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index cc5c63feb26a..9215c06783df 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1186,7 +1186,8 @@ static void decode_smca_error(struct mce *m)
if (xec < smca_mce_descs[bank_type].num_descs)
pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]);
- if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
+ if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
+ xec == 0 && decode_dram_ecc)
decode_dram_ecc(topology_die_id(m->extcpu), m);
}
diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c
new file mode 100644
index 000000000000..12b95be3e989
--- /dev/null
+++ b/drivers/edac/npcm_edac.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2022 Nuvoton Technology Corporation
+
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include "edac_module.h"
+
+#define EDAC_MOD_NAME "npcm-edac"
+#define EDAC_MSG_SIZE 256
+
+/* chip serials */
+#define NPCM7XX_CHIP BIT(0)
+#define NPCM8XX_CHIP BIT(1)
+
+/* syndrome values */
+#define UE_SYNDROME 0x03
+
+/* error injection */
+#define ERROR_TYPE_CORRECTABLE 0
+#define ERROR_TYPE_UNCORRECTABLE 1
+#define ERROR_LOCATION_DATA 0
+#define ERROR_LOCATION_CHECKCODE 1
+#define ERROR_BIT_DATA_MAX 63
+#define ERROR_BIT_CHECKCODE_MAX 7
+
+static char data_synd[] = {
+ 0xf4, 0xf1, 0xec, 0xea, 0xe9, 0xe6, 0xe5, 0xe3,
+ 0xdc, 0xda, 0xd9, 0xd6, 0xd5, 0xd3, 0xce, 0xcb,
+ 0xb5, 0xb0, 0xad, 0xab, 0xa8, 0xa7, 0xa4, 0xa2,
+ 0x9d, 0x9b, 0x98, 0x97, 0x94, 0x92, 0x8f, 0x8a,
+ 0x75, 0x70, 0x6d, 0x6b, 0x68, 0x67, 0x64, 0x62,
+ 0x5e, 0x5b, 0x58, 0x57, 0x54, 0x52, 0x4f, 0x4a,
+ 0x34, 0x31, 0x2c, 0x2a, 0x29, 0x26, 0x25, 0x23,
+ 0x1c, 0x1a, 0x19, 0x16, 0x15, 0x13, 0x0e, 0x0b
+};
+
+static struct regmap *npcm_regmap;
+
+struct npcm_platform_data {
+ /* chip serials */
+ int chip;
+
+ /* memory controller registers */
+ u32 ctl_ecc_en;
+ u32 ctl_int_status;
+ u32 ctl_int_ack;
+ u32 ctl_int_mask_master;
+ u32 ctl_int_mask_ecc;
+ u32 ctl_ce_addr_l;
+ u32 ctl_ce_addr_h;
+ u32 ctl_ce_data_l;
+ u32 ctl_ce_data_h;
+ u32 ctl_ce_synd;
+ u32 ctl_ue_addr_l;
+ u32 ctl_ue_addr_h;
+ u32 ctl_ue_data_l;
+ u32 ctl_ue_data_h;
+ u32 ctl_ue_synd;
+ u32 ctl_source_id;
+ u32 ctl_controller_busy;
+ u32 ctl_xor_check_bits;
+
+ /* masks and shifts */
+ u32 ecc_en_mask;
+ u32 int_status_ce_mask;
+ u32 int_status_ue_mask;
+ u32 int_ack_ce_mask;
+ u32 int_ack_ue_mask;
+ u32 int_mask_master_non_ecc_mask;
+ u32 int_mask_master_global_mask;
+ u32 int_mask_ecc_non_event_mask;
+ u32 ce_addr_h_mask;
+ u32 ce_synd_mask;
+ u32 ce_synd_shift;
+ u32 ue_addr_h_mask;
+ u32 ue_synd_mask;
+ u32 ue_synd_shift;
+ u32 source_id_ce_mask;
+ u32 source_id_ce_shift;
+ u32 source_id_ue_mask;
+ u32 source_id_ue_shift;
+ u32 controller_busy_mask;
+ u32 xor_check_bits_mask;
+ u32 xor_check_bits_shift;
+ u32 writeback_en_mask;
+ u32 fwc_mask;
+};
+
+struct priv_data {
+ void __iomem *reg;
+ char message[EDAC_MSG_SIZE];
+ const struct npcm_platform_data *pdata;
+
+ /* error injection */
+ struct dentry *debugfs;
+ u8 error_type;
+ u8 location;
+ u8 bit;
+};
+
+static void handle_ce(struct mem_ctl_info *mci)
+{
+ struct priv_data *priv = mci->pvt_info;
+ const struct npcm_platform_data *pdata;
+ u32 val_h = 0, val_l, id, synd;
+ u64 addr = 0, data = 0;
+
+ pdata = priv->pdata;
+ regmap_read(npcm_regmap, pdata->ctl_ce_addr_l, &val_l);
+ if (pdata->chip == NPCM8XX_CHIP) {
+ regmap_read(npcm_regmap, pdata->ctl_ce_addr_h, &val_h);
+ val_h &= pdata->ce_addr_h_mask;
+ }
+ addr = ((addr | val_h) << 32) | val_l;
+
+ regmap_read(npcm_regmap, pdata->ctl_ce_data_l, &val_l);
+ if (pdata->chip == NPCM8XX_CHIP)
+ regmap_read(npcm_regmap, pdata->ctl_ce_data_h, &val_h);
+ data = ((data | val_h) << 32) | val_l;
+
+ regmap_read(npcm_regmap, pdata->ctl_source_id, &id);
+ id = (id & pdata->source_id_ce_mask) >> pdata->source_id_ce_shift;
+
+ regmap_read(npcm_regmap, pdata->ctl_ce_synd, &synd);
+ synd = (synd & pdata->ce_synd_mask) >> pdata->ce_synd_shift;
+
+ snprintf(priv->message, EDAC_MSG_SIZE,
+ "addr = 0x%llx, data = 0x%llx, id = 0x%x", addr, data, id);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, addr >> PAGE_SHIFT,
+ addr & ~PAGE_MASK, synd, 0, 0, -1, priv->message, "");
+}
+
+static void handle_ue(struct mem_ctl_info *mci)
+{
+ struct priv_data *priv = mci->pvt_info;
+ const struct npcm_platform_data *pdata;
+ u32 val_h = 0, val_l, id, synd;
+ u64 addr = 0, data = 0;
+
+ pdata = priv->pdata;
+ regmap_read(npcm_regmap, pdata->ctl_ue_addr_l, &val_l);
+ if (pdata->chip == NPCM8XX_CHIP) {
+ regmap_read(npcm_regmap, pdata->ctl_ue_addr_h, &val_h);
+ val_h &= pdata->ue_addr_h_mask;
+ }
+ addr = ((addr | val_h) << 32) | val_l;
+
+ regmap_read(npcm_regmap, pdata->ctl_ue_data_l, &val_l);
+ if (pdata->chip == NPCM8XX_CHIP)
+ regmap_read(npcm_regmap, pdata->ctl_ue_data_h, &val_h);
+ data = ((data | val_h) << 32) | val_l;
+
+ regmap_read(npcm_regmap, pdata->ctl_source_id, &id);
+ id = (id & pdata->source_id_ue_mask) >> pdata->source_id_ue_shift;
+
+ regmap_read(npcm_regmap, pdata->ctl_ue_synd, &synd);
+ synd = (synd & pdata->ue_synd_mask) >> pdata->ue_synd_shift;
+
+ snprintf(priv->message, EDAC_MSG_SIZE,
+ "addr = 0x%llx, data = 0x%llx, id = 0x%x", addr, data, id);
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, addr >> PAGE_SHIFT,
+ addr & ~PAGE_MASK, synd, 0, 0, -1, priv->message, "");
+}
+
+static irqreturn_t edac_ecc_isr(int irq, void *dev_id)
+{
+ const struct npcm_platform_data *pdata;
+ struct mem_ctl_info *mci = dev_id;
+ u32 status;
+
+ pdata = ((struct priv_data *)mci->pvt_info)->pdata;
+ regmap_read(npcm_regmap, pdata->ctl_int_status, &status);
+ if (status & pdata->int_status_ce_mask) {
+ handle_ce(mci);
+
+ /* acknowledge the CE interrupt */
+ regmap_write(npcm_regmap, pdata->ctl_int_ack,
+ pdata->int_ack_ce_mask);
+ return IRQ_HANDLED;
+ } else if (status & pdata->int_status_ue_mask) {
+ handle_ue(mci);
+
+ /* acknowledge the UE interrupt */
+ regmap_write(npcm_regmap, pdata->ctl_int_ack,
+ pdata->int_ack_ue_mask);
+ return IRQ_HANDLED;
+ }
+
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+}
+
+static ssize_t force_ecc_error(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct priv_data *priv = mci->pvt_info;
+ const struct npcm_platform_data *pdata;
+ u32 val, syndrome;
+ int ret;
+
+ pdata = priv->pdata;
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "force an ECC error, type = %d, location = %d, bit = %d\n",
+ priv->error_type, priv->location, priv->bit);
+
+ /* ensure no pending writes */
+ ret = regmap_read_poll_timeout(npcm_regmap, pdata->ctl_controller_busy,
+ val, !(val & pdata->controller_busy_mask),
+ 1000, 10000);
+ if (ret) {
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "wait pending writes timeout\n");
+ return count;
+ }
+
+ regmap_read(npcm_regmap, pdata->ctl_xor_check_bits, &val);
+ val &= ~pdata->xor_check_bits_mask;
+
+ /* write syndrome to XOR_CHECK_BITS */
+ if (priv->error_type == ERROR_TYPE_CORRECTABLE) {
+ if (priv->location == ERROR_LOCATION_DATA &&
+ priv->bit > ERROR_BIT_DATA_MAX) {
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "data bit should not exceed %d (%d)\n",
+ ERROR_BIT_DATA_MAX, priv->bit);
+ return count;
+ }
+
+ if (priv->location == ERROR_LOCATION_CHECKCODE &&
+ priv->bit > ERROR_BIT_CHECKCODE_MAX) {
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "checkcode bit should not exceed %d (%d)\n",
+ ERROR_BIT_CHECKCODE_MAX, priv->bit);
+ return count;
+ }
+
+ syndrome = priv->location ? 1 << priv->bit
+ : data_synd[priv->bit];
+
+ regmap_write(npcm_regmap, pdata->ctl_xor_check_bits,
+ val | (syndrome << pdata->xor_check_bits_shift) |
+ pdata->writeback_en_mask);
+ } else if (priv->error_type == ERROR_TYPE_UNCORRECTABLE) {
+ regmap_write(npcm_regmap, pdata->ctl_xor_check_bits,
+ val | (UE_SYNDROME << pdata->xor_check_bits_shift));
+ }
+
+ /* force write check */
+ regmap_update_bits(npcm_regmap, pdata->ctl_xor_check_bits,
+ pdata->fwc_mask, pdata->fwc_mask);
+
+ return count;
+}
+
+static const struct file_operations force_ecc_error_fops = {
+ .open = simple_open,
+ .write = force_ecc_error,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * Setup debugfs for error injection.
+ *
+ * Nodes:
+ * error_type - 0: CE, 1: UE
+ * location - 0: data, 1: checkcode
+ * bit - 0 ~ 63 for data and 0 ~ 7 for checkcode
+ * force_ecc_error - trigger
+ *
+ * Examples:
+ * 1. Inject a correctable error (CE) at checkcode bit 7.
+ * ~# echo 0 > /sys/kernel/debug/edac/npcm-edac/error_type
+ * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/location
+ * ~# echo 7 > /sys/kernel/debug/edac/npcm-edac/bit
+ * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/force_ecc_error
+ *
+ * 2. Inject an uncorrectable error (UE).
+ * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/error_type
+ * ~# echo 1 > /sys/kernel/debug/edac/npcm-edac/force_ecc_error
+ */
+static void setup_debugfs(struct mem_ctl_info *mci)
+{
+ struct priv_data *priv = mci->pvt_info;
+
+ priv->debugfs = edac_debugfs_create_dir(mci->mod_name);
+ if (!priv->debugfs)
+ return;
+
+ edac_debugfs_create_x8("error_type", 0644, priv->debugfs, &priv->error_type);
+ edac_debugfs_create_x8("location", 0644, priv->debugfs, &priv->location);
+ edac_debugfs_create_x8("bit", 0644, priv->debugfs, &priv->bit);
+ edac_debugfs_create_file("force_ecc_error", 0200, priv->debugfs,
+ &mci->dev, &force_ecc_error_fops);
+}
+
+static int setup_irq(struct mem_ctl_info *mci, struct platform_device *pdev)
+{
+ const struct npcm_platform_data *pdata;
+ int ret, irq;
+
+ pdata = ((struct priv_data *)mci->pvt_info)->pdata;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME, "IRQ not defined in DTS\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, edac_ecc_isr, 0,
+ dev_name(&pdev->dev), mci);
+ if (ret < 0) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME, "failed to request IRQ\n");
+ return ret;
+ }
+
+ /* enable the functional group of ECC and mask the others */
+ regmap_write(npcm_regmap, pdata->ctl_int_mask_master,
+ pdata->int_mask_master_non_ecc_mask);
+
+ if (pdata->chip == NPCM8XX_CHIP)
+ regmap_write(npcm_regmap, pdata->ctl_int_mask_ecc,
+ pdata->int_mask_ecc_non_event_mask);
+
+ return 0;
+}
+
+static const struct regmap_config npcm_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int edac_probe(struct platform_device *pdev)
+{
+ const struct npcm_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct edac_mc_layer layers[1];
+ struct mem_ctl_info *mci;
+ struct priv_data *priv;
+ void __iomem *reg;
+ u32 val;
+ int rc;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ npcm_regmap = devm_regmap_init_mmio(dev, reg, &npcm_regmap_cfg);
+ if (IS_ERR(npcm_regmap))
+ return PTR_ERR(npcm_regmap);
+
+ pdata = of_device_get_match_data(dev);
+ if (!pdata)
+ return -EINVAL;
+
+ /* bail out if ECC is not enabled */
+ regmap_read(npcm_regmap, pdata->ctl_ecc_en, &val);
+ if (!(val & pdata->ecc_en_mask)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME, "ECC is not enabled\n");
+ return -EPERM;
+ }
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct priv_data));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ priv = mci->pvt_info;
+ priv->reg = reg;
+ priv->pdata = pdata;
+ platform_set_drvdata(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode = SCRUB_HW_SRC;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "npcm_ddr_controller";
+ mci->dev_name = dev_name(&pdev->dev);
+ mci->mod_name = EDAC_MOD_NAME;
+ mci->ctl_page_to_phys = NULL;
+
+ rc = setup_irq(mci, pdev);
+ if (rc)
+ goto free_edac_mc;
+
+ rc = edac_mc_add_mc(mci);
+ if (rc)
+ goto free_edac_mc;
+
+ if (IS_ENABLED(CONFIG_EDAC_DEBUG) && pdata->chip == NPCM8XX_CHIP)
+ setup_debugfs(mci);
+
+ return rc;
+
+free_edac_mc:
+ edac_mc_free(mci);
+ return rc;
+}
+
+static int edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+ struct priv_data *priv = mci->pvt_info;
+ const struct npcm_platform_data *pdata;
+
+ pdata = priv->pdata;
+ if (IS_ENABLED(CONFIG_EDAC_DEBUG) && pdata->chip == NPCM8XX_CHIP)
+ edac_debugfs_remove_recursive(priv->debugfs);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ regmap_write(npcm_regmap, pdata->ctl_int_mask_master,
+ pdata->int_mask_master_global_mask);
+ regmap_update_bits(npcm_regmap, pdata->ctl_ecc_en, pdata->ecc_en_mask, 0);
+
+ return 0;
+}
+
+static const struct npcm_platform_data npcm750_edac = {
+ .chip = NPCM7XX_CHIP,
+
+ /* memory controller registers */
+ .ctl_ecc_en = 0x174,
+ .ctl_int_status = 0x1d0,
+ .ctl_int_ack = 0x1d4,
+ .ctl_int_mask_master = 0x1d8,
+ .ctl_ce_addr_l = 0x188,
+ .ctl_ce_data_l = 0x190,
+ .ctl_ce_synd = 0x18c,
+ .ctl_ue_addr_l = 0x17c,
+ .ctl_ue_data_l = 0x184,
+ .ctl_ue_synd = 0x180,
+ .ctl_source_id = 0x194,
+
+ /* masks and shifts */
+ .ecc_en_mask = BIT(24),
+ .int_status_ce_mask = GENMASK(4, 3),
+ .int_status_ue_mask = GENMASK(6, 5),
+ .int_ack_ce_mask = GENMASK(4, 3),
+ .int_ack_ue_mask = GENMASK(6, 5),
+ .int_mask_master_non_ecc_mask = GENMASK(30, 7) | GENMASK(2, 0),
+ .int_mask_master_global_mask = BIT(31),
+ .ce_synd_mask = GENMASK(6, 0),
+ .ce_synd_shift = 0,
+ .ue_synd_mask = GENMASK(6, 0),
+ .ue_synd_shift = 0,
+ .source_id_ce_mask = GENMASK(29, 16),
+ .source_id_ce_shift = 16,
+ .source_id_ue_mask = GENMASK(13, 0),
+ .source_id_ue_shift = 0,
+};
+
+static const struct npcm_platform_data npcm845_edac = {
+ .chip = NPCM8XX_CHIP,
+
+ /* memory controller registers */
+ .ctl_ecc_en = 0x16c,
+ .ctl_int_status = 0x228,
+ .ctl_int_ack = 0x244,
+ .ctl_int_mask_master = 0x220,
+ .ctl_int_mask_ecc = 0x260,
+ .ctl_ce_addr_l = 0x18c,
+ .ctl_ce_addr_h = 0x190,
+ .ctl_ce_data_l = 0x194,
+ .ctl_ce_data_h = 0x198,
+ .ctl_ce_synd = 0x190,
+ .ctl_ue_addr_l = 0x17c,
+ .ctl_ue_addr_h = 0x180,
+ .ctl_ue_data_l = 0x184,
+ .ctl_ue_data_h = 0x188,
+ .ctl_ue_synd = 0x180,
+ .ctl_source_id = 0x19c,
+ .ctl_controller_busy = 0x20c,
+ .ctl_xor_check_bits = 0x174,
+
+ /* masks and shifts */
+ .ecc_en_mask = GENMASK(17, 16),
+ .int_status_ce_mask = GENMASK(1, 0),
+ .int_status_ue_mask = GENMASK(3, 2),
+ .int_ack_ce_mask = GENMASK(1, 0),
+ .int_ack_ue_mask = GENMASK(3, 2),
+ .int_mask_master_non_ecc_mask = GENMASK(30, 3) | GENMASK(1, 0),
+ .int_mask_master_global_mask = BIT(31),
+ .int_mask_ecc_non_event_mask = GENMASK(8, 4),
+ .ce_addr_h_mask = GENMASK(1, 0),
+ .ce_synd_mask = GENMASK(15, 8),
+ .ce_synd_shift = 8,
+ .ue_addr_h_mask = GENMASK(1, 0),
+ .ue_synd_mask = GENMASK(15, 8),
+ .ue_synd_shift = 8,
+ .source_id_ce_mask = GENMASK(29, 16),
+ .source_id_ce_shift = 16,
+ .source_id_ue_mask = GENMASK(13, 0),
+ .source_id_ue_shift = 0,
+ .controller_busy_mask = BIT(0),
+ .xor_check_bits_mask = GENMASK(23, 16),
+ .xor_check_bits_shift = 16,
+ .writeback_en_mask = BIT(24),
+ .fwc_mask = BIT(8),
+};
+
+static const struct of_device_id npcm_edac_of_match[] = {
+ {
+ .compatible = "nuvoton,npcm750-memory-controller",
+ .data = &npcm750_edac
+ },
+ {
+ .compatible = "nuvoton,npcm845-memory-controller",
+ .data = &npcm845_edac
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, npcm_edac_of_match);
+
+static struct platform_driver npcm_edac_driver = {
+ .driver = {
+ .name = "npcm-edac",
+ .of_match_table = npcm_edac_of_match,
+ },
+ .probe = edac_probe,
+ .remove = edac_remove,
+};
+
+module_platform_driver(npcm_edac_driver);
+
+MODULE_AUTHOR("Medad CChien <medadyoung@gmail.com>");
+MODULE_AUTHOR("Marvin Lin <kflin@nuvoton.com>");
+MODULE_DESCRIPTION("Nuvoton NPCM EDAC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 265e0fb39bc7..b2db545c6810 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -21,30 +21,9 @@
#define TRP_SYN_REG_CNT 6
#define DRP_SYN_REG_CNT 8
-#define LLCC_COMMON_STATUS0 0x0003000c
#define LLCC_LB_CNT_MASK GENMASK(31, 28)
#define LLCC_LB_CNT_SHIFT 28
-/* Single & double bit syndrome register offsets */
-#define TRP_ECC_SB_ERR_SYN0 0x0002304c
-#define TRP_ECC_DB_ERR_SYN0 0x00020370
-#define DRP_ECC_SB_ERR_SYN0 0x0004204c
-#define DRP_ECC_DB_ERR_SYN0 0x00042070
-
-/* Error register offsets */
-#define TRP_ECC_ERROR_STATUS1 0x00020348
-#define TRP_ECC_ERROR_STATUS0 0x00020344
-#define DRP_ECC_ERROR_STATUS1 0x00042048
-#define DRP_ECC_ERROR_STATUS0 0x00042044
-
-/* TRP, DRP interrupt register offsets */
-#define DRP_INTERRUPT_STATUS 0x00041000
-#define TRP_INTERRUPT_0_STATUS 0x00020480
-#define DRP_INTERRUPT_CLEAR 0x00041008
-#define DRP_ECC_ERROR_CNTR_CLEAR 0x00040004
-#define TRP_INTERRUPT_0_CLEAR 0x00020484
-#define TRP_ECC_ERROR_CNTR_CLEAR 0x00020440
-
/* Mask and shift macros */
#define ECC_DB_ERR_COUNT_MASK GENMASK(4, 0)
#define ECC_DB_ERR_WAYS_MASK GENMASK(31, 16)
@@ -60,15 +39,6 @@
#define DRP_TRP_INT_CLEAR GENMASK(1, 0)
#define DRP_TRP_CNT_CLEAR GENMASK(1, 0)
-/* Config registers offsets*/
-#define DRP_ECC_ERROR_CFG 0x00040000
-
-/* Tag RAM, Data RAM interrupt register offsets */
-#define CMN_INTERRUPT_0_ENABLE 0x0003001c
-#define CMN_INTERRUPT_2_ENABLE 0x0003003c
-#define TRP_INTERRUPT_0_ENABLE 0x00020488
-#define DRP_INTERRUPT_ENABLE 0x0004100c
-
#define SB_ERROR_THRESHOLD 0x1
#define SB_ERROR_THRESHOLD_SHIFT 24
#define SB_DB_TRP_INTERRUPT_ENABLE 0x3
@@ -88,9 +58,6 @@ enum {
static const struct llcc_edac_reg_data edac_reg_data[] = {
[LLCC_DRAM_CE] = {
.name = "DRAM Single-bit",
- .synd_reg = DRP_ECC_SB_ERR_SYN0,
- .count_status_reg = DRP_ECC_ERROR_STATUS1,
- .ways_status_reg = DRP_ECC_ERROR_STATUS0,
.reg_cnt = DRP_SYN_REG_CNT,
.count_mask = ECC_SB_ERR_COUNT_MASK,
.ways_mask = ECC_SB_ERR_WAYS_MASK,
@@ -98,9 +65,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
[LLCC_DRAM_UE] = {
.name = "DRAM Double-bit",
- .synd_reg = DRP_ECC_DB_ERR_SYN0,
- .count_status_reg = DRP_ECC_ERROR_STATUS1,
- .ways_status_reg = DRP_ECC_ERROR_STATUS0,
.reg_cnt = DRP_SYN_REG_CNT,
.count_mask = ECC_DB_ERR_COUNT_MASK,
.ways_mask = ECC_DB_ERR_WAYS_MASK,
@@ -108,9 +72,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
[LLCC_TRAM_CE] = {
.name = "TRAM Single-bit",
- .synd_reg = TRP_ECC_SB_ERR_SYN0,
- .count_status_reg = TRP_ECC_ERROR_STATUS1,
- .ways_status_reg = TRP_ECC_ERROR_STATUS0,
.reg_cnt = TRP_SYN_REG_CNT,
.count_mask = ECC_SB_ERR_COUNT_MASK,
.ways_mask = ECC_SB_ERR_WAYS_MASK,
@@ -118,9 +79,6 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
[LLCC_TRAM_UE] = {
.name = "TRAM Double-bit",
- .synd_reg = TRP_ECC_DB_ERR_SYN0,
- .count_status_reg = TRP_ECC_ERROR_STATUS1,
- .ways_status_reg = TRP_ECC_ERROR_STATUS0,
.reg_cnt = TRP_SYN_REG_CNT,
.count_mask = ECC_DB_ERR_COUNT_MASK,
.ways_mask = ECC_DB_ERR_WAYS_MASK,
@@ -128,7 +86,7 @@ static const struct llcc_edac_reg_data edac_reg_data[] = {
},
};
-static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
+static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_bcast_regmap)
{
u32 sb_err_threshold;
int ret;
@@ -137,31 +95,31 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, TRP_INTERRUPT_0_ENABLE,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->trp_interrupt_0_enable,
SB_DB_TRP_INTERRUPT_ENABLE,
SB_DB_TRP_INTERRUPT_ENABLE);
if (ret)
return ret;
sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
- ret = regmap_write(llcc_bcast_regmap, DRP_ECC_ERROR_CFG,
+ ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_ecc_error_cfg,
sb_err_threshold);
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, CMN_INTERRUPT_2_ENABLE,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
return ret;
- ret = regmap_write(llcc_bcast_regmap, DRP_INTERRUPT_ENABLE,
+ ret = regmap_write(llcc_bcast_regmap, drv->edac_reg_offset->drp_interrupt_enable,
SB_DB_DRP_INTERRUPT_ENABLE);
return ret;
}
@@ -170,29 +128,33 @@ static int qcom_llcc_core_setup(struct regmap *llcc_bcast_regmap)
static int
qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
{
- int ret = 0;
+ int ret;
switch (err_type) {
case LLCC_DRAM_CE:
case LLCC_DRAM_UE:
- ret = regmap_write(drv->bcast_regmap, DRP_INTERRUPT_CLEAR,
+ ret = regmap_write(drv->bcast_regmap,
+ drv->edac_reg_offset->drp_interrupt_clear,
DRP_TRP_INT_CLEAR);
if (ret)
return ret;
- ret = regmap_write(drv->bcast_regmap, DRP_ECC_ERROR_CNTR_CLEAR,
+ ret = regmap_write(drv->bcast_regmap,
+ drv->edac_reg_offset->drp_ecc_error_cntr_clear,
DRP_TRP_CNT_CLEAR);
if (ret)
return ret;
break;
case LLCC_TRAM_CE:
case LLCC_TRAM_UE:
- ret = regmap_write(drv->bcast_regmap, TRP_INTERRUPT_0_CLEAR,
+ ret = regmap_write(drv->bcast_regmap,
+ drv->edac_reg_offset->trp_interrupt_0_clear,
DRP_TRP_INT_CLEAR);
if (ret)
return ret;
- ret = regmap_write(drv->bcast_regmap, TRP_ECC_ERROR_CNTR_CLEAR,
+ ret = regmap_write(drv->bcast_regmap,
+ drv->edac_reg_offset->trp_ecc_error_cntr_clear,
DRP_TRP_CNT_CLEAR);
if (ret)
return ret;
@@ -205,16 +167,54 @@ qcom_llcc_clear_error_status(int err_type, struct llcc_drv_data *drv)
return ret;
}
+struct qcom_llcc_syn_regs {
+ u32 synd_reg;
+ u32 count_status_reg;
+ u32 ways_status_reg;
+};
+
+static void get_reg_offsets(struct llcc_drv_data *drv, int err_type,
+ struct qcom_llcc_syn_regs *syn_regs)
+{
+ const struct llcc_edac_reg_offset *edac_reg_offset = drv->edac_reg_offset;
+
+ switch (err_type) {
+ case LLCC_DRAM_CE:
+ syn_regs->synd_reg = edac_reg_offset->drp_ecc_sb_err_syn0;
+ syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
+ break;
+ case LLCC_DRAM_UE:
+ syn_regs->synd_reg = edac_reg_offset->drp_ecc_db_err_syn0;
+ syn_regs->count_status_reg = edac_reg_offset->drp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg_offset->drp_ecc_error_status0;
+ break;
+ case LLCC_TRAM_CE:
+ syn_regs->synd_reg = edac_reg_offset->trp_ecc_sb_err_syn0;
+ syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
+ break;
+ case LLCC_TRAM_UE:
+ syn_regs->synd_reg = edac_reg_offset->trp_ecc_db_err_syn0;
+ syn_regs->count_status_reg = edac_reg_offset->trp_ecc_error_status1;
+ syn_regs->ways_status_reg = edac_reg_offset->trp_ecc_error_status0;
+ break;
+ }
+}
+
/* Dump Syndrome registers data for Tag RAM, Data RAM bit errors*/
static int
dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
{
struct llcc_edac_reg_data reg_data = edac_reg_data[err_type];
+ struct qcom_llcc_syn_regs regs = { };
int err_cnt, err_ways, ret, i;
u32 synd_reg, synd_val;
+ get_reg_offsets(drv, err_type, &regs);
+
for (i = 0; i < reg_data.reg_cnt; i++) {
- synd_reg = reg_data.synd_reg + (i * 4);
+ synd_reg = regs.synd_reg + (i * 4);
ret = regmap_read(drv->regmaps[bank], synd_reg,
&synd_val);
if (ret)
@@ -224,7 +224,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
reg_data.name, i, synd_val);
}
- ret = regmap_read(drv->regmaps[bank], reg_data.count_status_reg,
+ ret = regmap_read(drv->regmaps[bank], regs.count_status_reg,
&err_cnt);
if (ret)
goto clear;
@@ -234,7 +234,7 @@ dump_syn_reg_values(struct llcc_drv_data *drv, u32 bank, int err_type)
edac_printk(KERN_CRIT, EDAC_LLCC, "%s: Error count: 0x%4x\n",
reg_data.name, err_cnt);
- ret = regmap_read(drv->regmaps[bank], reg_data.ways_status_reg,
+ ret = regmap_read(drv->regmaps[bank], regs.ways_status_reg,
&err_ways);
if (ret)
goto clear;
@@ -295,7 +295,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
/* Iterate over the banks and look for Tag RAM or Data RAM errors */
for (i = 0; i < drv->num_banks; i++) {
- ret = regmap_read(drv->regmaps[i], DRP_INTERRUPT_STATUS,
+ ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->drp_interrupt_status,
&drp_error);
if (!ret && (drp_error & SB_ECC_ERROR)) {
@@ -310,7 +310,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
if (!ret)
irq_rc = IRQ_HANDLED;
- ret = regmap_read(drv->regmaps[i], TRP_INTERRUPT_0_STATUS,
+ ret = regmap_read(drv->regmaps[i], drv->edac_reg_offset->trp_interrupt_0_status,
&trp_error);
if (!ret && (trp_error & SB_ECC_ERROR)) {
@@ -342,7 +342,7 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
int ecc_irq;
int rc;
- rc = qcom_llcc_core_setup(llcc_driv_data->bcast_regmap);
+ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
if (rc)
return rc;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 0bcd9f02c84a..b9c5772da959 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -481,7 +481,7 @@ static int thunderx_create_debugfs_nodes(struct dentry *parent,
ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode,
parent, data, &attrs[i]->fops);
- if (!ent)
+ if (IS_ERR(ent))
break;
}
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index f29d77ecf72d..2b8bfcd010f5 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,6 +15,8 @@
#include "common.h"
+static DEFINE_IDA(ffa_bus_id);
+
static int ffa_device_match(struct device *dev, struct device_driver *drv)
{
const struct ffa_device_id *id_table;
@@ -53,7 +55,8 @@ static void ffa_device_remove(struct device *dev)
{
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
- ffa_drv->remove(to_ffa_dev(dev));
+ if (ffa_drv->remove)
+ ffa_drv->remove(to_ffa_dev(dev));
}
static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -130,6 +133,7 @@ static void ffa_release_device(struct device *dev)
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ ida_free(&ffa_bus_id, ffa_dev->id);
kfree(ffa_dev);
}
@@ -170,18 +174,24 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
const struct ffa_ops *ops)
{
- int ret;
+ int id, ret;
struct device *dev;
struct ffa_device *ffa_dev;
+ id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
- if (!ffa_dev)
+ if (!ffa_dev) {
+ ida_free(&ffa_bus_id, id);
return NULL;
+ }
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
- dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->vm_id = vm_id;
ffa_dev->ops = ops;
@@ -217,4 +227,5 @@ void arm_ffa_bus_exit(void)
{
ffa_devices_unregister();
bus_unregister(&ffa_bus_type);
+ ida_destroy(&ffa_bus_id);
}
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index fa85c64d3ded..2109cd178ff7 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -193,7 +193,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
int idx, count, flags = 0, sz, buf_sz;
ffa_value_t partition_info;
- if (!buffer || !num_partitions) /* Just get the count for now */
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ (!buffer || !num_partitions)) /* Just get the count for now */
flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
mutex_lock(&drv_info->rx_lock);
@@ -420,12 +421,18 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+ ep_mem_access->flag = 0;
+ ep_mem_access->reserved = 0;
}
+ mem_region->handle = 0;
+ mem_region->reserved_0 = 0;
+ mem_region->reserved_1 = 0;
mem_region->ep_count = args->nattrs;
composite = buffer + COMPOSITE_OFFSET(args->nattrs);
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
composite->addr_range_cnt = num_entries;
+ composite->reserved = 0;
length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
@@ -460,6 +467,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
constituents->address = sg_phys(args->sg);
constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+ constituents->reserved = 0;
constituents++;
frag_len += sizeof(struct ffa_mem_region_addr_range);
} while ((args->sg = sg_next(args->sg)));
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index d40df099fd51..6971dcf72fb9 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -1066,7 +1066,7 @@ static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
WQ_UNBOUND | WQ_FREEZABLE |
- WQ_HIGHPRI, WQ_SYSFS, raw->id);
+ WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
if (!raw->wait_wq)
return -ENOMEM;
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index e4ccfb6a8fa5..ec056f6f40ce 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -2124,6 +2124,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
file, blocks, le32_to_cpu(blk->len),
type, le32_to_cpu(blk->id));
+ region_name = cs_dsp_mem_region_name(type);
mem = cs_dsp_find_region(dsp, type);
if (!mem) {
cs_dsp_err(dsp, "No base for region %x\n", type);
@@ -2147,8 +2148,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
reg = dsp->ops->region_to_reg(mem, reg);
reg += offset;
} else {
- cs_dsp_err(dsp, "No %x for algorithm %x\n",
- type, le32_to_cpu(blk->id));
+ cs_dsp_err(dsp, "No %s for algorithm %x\n",
+ region_name, le32_to_cpu(blk->id));
}
break;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 043ca31c114e..231f1c70d1db 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -269,6 +269,20 @@ config EFI_COCO_SECRET
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+config UNACCEPTED_MEMORY
+ bool
+ depends on EFI_STUB
+ help
+ Some Virtual Machine platforms, such as Intel TDX, require
+ some memory to be "accepted" by the guest before it can be used.
+ This mechanism helps prevent malicious hosts from making changes
+ to guest memory.
+
+ UEFI specification v2.9 introduced EFI_UNACCEPTED_MEMORY memory type.
+
+ This option adds support for unaccepted memory and makes such memory
+ usable by the kernel.
+
config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index b51f2a4c821e..e489fefd23da 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o
+obj-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index abeff7dc0b58..3a6ee7bb06f1 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -50,6 +50,9 @@ struct efi __read_mostly efi = {
#ifdef CONFIG_EFI_COCO_SECRET
.coco_secret = EFI_INVALID_TABLE_ADDR,
#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ .unaccepted = EFI_INVALID_TABLE_ADDR,
+#endif
};
EXPORT_SYMBOL(efi);
@@ -361,24 +364,6 @@ static void __init efi_debugfs_init(void)
static inline void efi_debugfs_init(void) {}
#endif
-static void refresh_nv_rng_seed(struct work_struct *work)
-{
- u8 seed[EFI_RANDOM_SEED_SIZE];
-
- get_random_bytes(seed, sizeof(seed));
- efi.set_variable(L"RandomSeed", &LINUX_EFI_RANDOM_SEED_TABLE_GUID,
- EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS, sizeof(seed), seed);
- memzero_explicit(seed, sizeof(seed));
-}
-static int refresh_nv_rng_seed_notification(struct notifier_block *nb, unsigned long action, void *data)
-{
- static DECLARE_WORK(work, refresh_nv_rng_seed);
- schedule_work(&work);
- return NOTIFY_DONE;
-}
-static struct notifier_block refresh_nv_rng_seed_nb = { .notifier_call = refresh_nv_rng_seed_notification };
-
/*
* We register the efi subsystem with the firmware subsystem and the
* efivars subsystem with the efi subsystem, if the system was booted with
@@ -451,9 +436,6 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efi_secret", 0, NULL, 0);
#endif
- if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
- execute_with_initialized_rng(&refresh_nv_rng_seed_nb);
-
return 0;
err_remove_group:
@@ -605,6 +587,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
#ifdef CONFIG_EFI_COCO_SECRET
{LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" },
+#endif
#ifdef CONFIG_EFI_GENERIC_STUB
{LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
#endif
@@ -759,6 +744,25 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
}
}
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
+ efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
+ struct efi_unaccepted_memory *unaccepted;
+
+ unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
+ if (unaccepted) {
+ unsigned long size;
+
+ if (unaccepted->version == 1) {
+ size = sizeof(*unaccepted) + unaccepted->size;
+ memblock_reserve(efi.unaccepted, size);
+ } else {
+ efi.unaccepted = EFI_INVALID_TABLE_ADDR;
+ }
+
+ early_memunmap(unaccepted, sizeof(*unaccepted));
+ }
+ }
+
return 0;
}
@@ -843,6 +847,7 @@ static __initdata char memory_type_name[][13] = {
"MMIO Port",
"PAL Code",
"Persistent",
+ "Unaccepted",
};
char * __init efi_md_typeattr_format(char *buf, size_t size,
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 3abb2b357482..16d64a34d1e1 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -96,6 +96,8 @@ CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
+lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
+
extra-y := $(lib-y)
lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 89ef820f3b34..2c489627a807 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -32,7 +32,8 @@ zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
$(call if_changed,$(zboot-method-y))
-OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
+# avoid eager evaluation to prevent references to non-existent build artifacts
+OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
--rename-section .data=.gzdata,load,alloc,readonly,contents
$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
$(call if_changed,objcopy)
diff --git a/drivers/firmware/efi/libstub/bitmap.c b/drivers/firmware/efi/libstub/bitmap.c
new file mode 100644
index 000000000000..5c9bba0d549b
--- /dev/null
+++ b/drivers/firmware/efi/libstub/bitmap.c
@@ -0,0 +1,41 @@
+#include <linux/bitmap.h>
+
+void __bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+void __bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 67d5a20802e0..6aa38a1bf126 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1133,4 +1133,13 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
+asmlinkage efi_status_t __efiapi
+efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
+
+efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
+ struct efi_boot_memmap *map);
+void process_unaccepted_memory(u64 start, u64 end);
+void accept_memory(phys_addr_t start, phys_addr_t end);
+void arch_accept_memory(phys_addr_t start, phys_addr_t end);
+
#endif
diff --git a/drivers/firmware/efi/libstub/find.c b/drivers/firmware/efi/libstub/find.c
new file mode 100644
index 000000000000..4e7740d28987
--- /dev/null
+++ b/drivers/firmware/efi/libstub/find.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitmap.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+
+/*
+ * Common helper for find_next_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ */
+#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
+({ \
+ unsigned long mask, idx, tmp, sz = (size), __start = (start); \
+ \
+ if (unlikely(__start >= sz)) \
+ goto out; \
+ \
+ mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
+ idx = __start / BITS_PER_LONG; \
+ \
+ for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
+ if ((idx + 1) * BITS_PER_LONG >= sz) \
+ goto out; \
+ idx++; \
+ } \
+ \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
+out: \
+ sz; \
+})
+
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
+}
+
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start)
+{
+ return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
+}
diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c
new file mode 100644
index 000000000000..ca61f4733ea5
--- /dev/null
+++ b/drivers/firmware/efi/libstub/unaccepted_memory.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include "efistub.h"
+
+struct efi_unaccepted_memory *unaccepted_table;
+
+efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
+ struct efi_boot_memmap *map)
+{
+ efi_guid_t unaccepted_table_guid = LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID;
+ u64 unaccepted_start = ULLONG_MAX, unaccepted_end = 0, bitmap_size;
+ efi_status_t status;
+ int i;
+
+ /* Check if the table is already installed */
+ unaccepted_table = get_efi_config_table(unaccepted_table_guid);
+ if (unaccepted_table) {
+ if (unaccepted_table->version != 1) {
+ efi_err("Unknown version of unaccepted memory table\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+ }
+
+ /* Check if there's any unaccepted memory and find the max address */
+ for (i = 0; i < nr_desc; i++) {
+ efi_memory_desc_t *d;
+ unsigned long m = (unsigned long)map->map;
+
+ d = efi_early_memdesc_ptr(m, map->desc_size, i);
+ if (d->type != EFI_UNACCEPTED_MEMORY)
+ continue;
+
+ unaccepted_start = min(unaccepted_start, d->phys_addr);
+ unaccepted_end = max(unaccepted_end,
+ d->phys_addr + d->num_pages * PAGE_SIZE);
+ }
+
+ if (unaccepted_start == ULLONG_MAX)
+ return EFI_SUCCESS;
+
+ unaccepted_start = round_down(unaccepted_start,
+ EFI_UNACCEPTED_UNIT_SIZE);
+ unaccepted_end = round_up(unaccepted_end, EFI_UNACCEPTED_UNIT_SIZE);
+
+ /*
+ * If unaccepted memory is present, allocate a bitmap to track what
+ * memory has to be accepted before access.
+ *
+ * One bit in the bitmap represents 2MiB in the address space:
+ * A 4k bitmap can track 64GiB of physical address space.
+ *
+ * In the worst case scenario -- a huge hole in the middle of the
+ * address space -- It needs 256MiB to handle 4PiB of the address
+ * space.
+ *
+ * The bitmap will be populated in setup_e820() according to the memory
+ * map after efi_exit_boot_services().
+ */
+ bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
+ EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*unaccepted_table) + bitmap_size,
+ (void **)&unaccepted_table);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate unaccepted memory config table\n");
+ return status;
+ }
+
+ unaccepted_table->version = 1;
+ unaccepted_table->unit_size = EFI_UNACCEPTED_UNIT_SIZE;
+ unaccepted_table->phys_base = unaccepted_start;
+ unaccepted_table->size = bitmap_size;
+ memset(unaccepted_table->bitmap, 0, bitmap_size);
+
+ status = efi_bs_call(install_configuration_table,
+ &unaccepted_table_guid, unaccepted_table);
+ if (status != EFI_SUCCESS) {
+ efi_bs_call(free_pool, unaccepted_table);
+ efi_err("Failed to install unaccepted memory config table!\n");
+ }
+
+ return status;
+}
+
+/*
+ * The accepted memory bitmap only works at unit_size granularity. Take
+ * unaligned start/end addresses and either:
+ * 1. Accepts the memory immediately and in its entirety
+ * 2. Accepts unaligned parts, and marks *some* aligned part unaccepted
+ *
+ * The function will never reach the bitmap_set() with zero bits to set.
+ */
+void process_unaccepted_memory(u64 start, u64 end)
+{
+ u64 unit_size = unaccepted_table->unit_size;
+ u64 unit_mask = unaccepted_table->unit_size - 1;
+ u64 bitmap_size = unaccepted_table->size;
+
+ /*
+ * Ensure that at least one bit will be set in the bitmap by
+ * immediately accepting all regions under 2*unit_size. This is
+ * imprecise and may immediately accept some areas that could
+ * have been represented in the bitmap. But, results in simpler
+ * code below
+ *
+ * Consider case like this (assuming unit_size == 2MB):
+ *
+ * | 4k | 2044k | 2048k |
+ * ^ 0x0 ^ 2MB ^ 4MB
+ *
+ * Only the first 4k has been accepted. The 0MB->2MB region can not be
+ * represented in the bitmap. The 2MB->4MB region can be represented in
+ * the bitmap. But, the 0MB->4MB region is <2*unit_size and will be
+ * immediately accepted in its entirety.
+ */
+ if (end - start < 2 * unit_size) {
+ arch_accept_memory(start, end);
+ return;
+ }
+
+ /*
+ * No matter how the start and end are aligned, at least one unaccepted
+ * unit_size area will remain to be marked in the bitmap.
+ */
+
+ /* Immediately accept a <unit_size piece at the start: */
+ if (start & unit_mask) {
+ arch_accept_memory(start, round_up(start, unit_size));
+ start = round_up(start, unit_size);
+ }
+
+ /* Immediately accept a <unit_size piece at the end: */
+ if (end & unit_mask) {
+ arch_accept_memory(round_down(end, unit_size), end);
+ end = round_down(end, unit_size);
+ }
+
+ /*
+ * Accept part of the range that before phys_base and cannot be recorded
+ * into the bitmap.
+ */
+ if (start < unaccepted_table->phys_base) {
+ arch_accept_memory(start,
+ min(unaccepted_table->phys_base, end));
+ start = unaccepted_table->phys_base;
+ }
+
+ /* Nothing to record */
+ if (end < unaccepted_table->phys_base)
+ return;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted_table->phys_base;
+ end -= unaccepted_table->phys_base;
+
+ /* Accept memory that doesn't fit into bitmap */
+ if (end > bitmap_size * unit_size * BITS_PER_BYTE) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = bitmap_size * unit_size * BITS_PER_BYTE +
+ unaccepted_table->phys_base;
+ phys_end = end + unaccepted_table->phys_base;
+
+ arch_accept_memory(phys_start, phys_end);
+ end = bitmap_size * unit_size * BITS_PER_BYTE;
+ }
+
+ /*
+ * 'start' and 'end' are now both unit_size-aligned.
+ * Record the range as being unaccepted:
+ */
+ bitmap_set(unaccepted_table->bitmap,
+ start / unit_size, (end - start) / unit_size);
+}
+
+void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long range_start, range_end;
+ unsigned long bitmap_size;
+ u64 unit_size;
+
+ if (!unaccepted_table)
+ return;
+
+ unit_size = unaccepted_table->unit_size;
+
+ /*
+ * Only care for the part of the range that is represented
+ * in the bitmap.
+ */
+ if (start < unaccepted_table->phys_base)
+ start = unaccepted_table->phys_base;
+ if (end < unaccepted_table->phys_base)
+ return;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted_table->phys_base;
+ end -= unaccepted_table->phys_base;
+
+ /* Make sure not to overrun the bitmap */
+ if (end > unaccepted_table->size * unit_size * BITS_PER_BYTE)
+ end = unaccepted_table->size * unit_size * BITS_PER_BYTE;
+
+ range_start = start / unit_size;
+ bitmap_size = DIV_ROUND_UP(end, unit_size);
+
+ for_each_set_bitrange_from(range_start, range_end,
+ unaccepted_table->bitmap, bitmap_size) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = range_start * unit_size + unaccepted_table->phys_base;
+ phys_end = range_end * unit_size + unaccepted_table->phys_base;
+
+ arch_accept_memory(phys_start, phys_end);
+ bitmap_clear(unaccepted_table->bitmap,
+ range_start, range_end - range_start);
+ }
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index a0bfd31358ba..220be75a5cdc 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -26,6 +26,17 @@ const efi_dxe_services_table_t *efi_dxe_table;
u32 image_offset __section(".data");
static efi_loaded_image_t *image = NULL;
+typedef union sev_memory_acceptance_protocol sev_memory_acceptance_protocol_t;
+union sev_memory_acceptance_protocol {
+ struct {
+ efi_status_t (__efiapi * allow_unaccepted_memory)(
+ sev_memory_acceptance_protocol_t *);
+ };
+ struct {
+ u32 allow_unaccepted_memory;
+ } mixed_mode;
+};
+
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
@@ -310,6 +321,29 @@ setup_memory_protection(unsigned long image_base, unsigned long image_size)
#endif
}
+static void setup_unaccepted_memory(void)
+{
+ efi_guid_t mem_acceptance_proto = OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID;
+ sev_memory_acceptance_protocol_t *proto;
+ efi_status_t status;
+
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return;
+
+ /*
+ * Enable unaccepted memory before calling exit boot services in order
+ * for the UEFI to not accept all memory on EBS.
+ */
+ status = efi_bs_call(locate_protocol, &mem_acceptance_proto, NULL,
+ (void **)&proto);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(proto, allow_unaccepted_memory);
+ if (status != EFI_SUCCESS)
+ efi_err("Memory acceptance protocol failed\n");
+}
+
static const efi_char16_t apple[] = L"Apple";
static void setup_quirks(struct boot_params *boot_params,
@@ -613,6 +647,16 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
e820_type = E820_TYPE_PMEM;
break;
+ case EFI_UNACCEPTED_MEMORY:
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) {
+ efi_warn_once(
+"The system has unaccepted memory, but kernel does not support it\nConsider enabling CONFIG_UNACCEPTED_MEMORY\n");
+ continue;
+ }
+ e820_type = E820_TYPE_RAM;
+ process_unaccepted_memory(d->phys_addr,
+ d->phys_addr + PAGE_SIZE * d->num_pages);
+ break;
default:
continue;
}
@@ -681,28 +725,27 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- unsigned long map_size, desc_size, map_key;
+ struct efi_boot_memmap *map;
efi_status_t status;
- __u32 nr_desc, desc_version;
-
- /* Only need the size of the mem map and size of each mem descriptor */
- map_size = 0;
- status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key,
- &desc_size, &desc_version);
- if (status != EFI_BUFFER_TOO_SMALL)
- return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED;
+ __u32 nr_desc;
- nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS;
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ return status;
- if (nr_desc > ARRAY_SIZE(params->e820_table)) {
- u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
+ nr_desc = map->map_size / map->desc_size;
+ if (nr_desc > ARRAY_SIZE(params->e820_table) - EFI_MMAP_NR_SLACK_SLOTS) {
+ u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table) +
+ EFI_MMAP_NR_SLACK_SLOTS;
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
- if (status != EFI_SUCCESS)
- return status;
}
- return EFI_SUCCESS;
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
+ status = allocate_unaccepted_bitmap(nr_desc, map);
+
+ efi_bs_call(free_pool, map);
+ return status;
}
struct exit_boot_struct {
@@ -899,6 +942,8 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start);
+ setup_unaccepted_memory();
+
status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
efi_err("exit_boot() failed!\n");
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
new file mode 100644
index 000000000000..853f7dc3c21d
--- /dev/null
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/efi.h>
+#include <linux/memblock.h>
+#include <linux/spinlock.h>
+#include <asm/unaccepted_memory.h>
+
+/* Protects unaccepted memory bitmap */
+static DEFINE_SPINLOCK(unaccepted_memory_lock);
+
+/*
+ * accept_memory() -- Consult bitmap and accept the memory if needed.
+ *
+ * Only memory that is explicitly marked as unaccepted in the bitmap requires
+ * an action. All the remaining memory is implicitly accepted and doesn't need
+ * acceptance.
+ *
+ * No need to accept:
+ * - anything if the system has no unaccepted table;
+ * - memory that is below phys_base;
+ * - memory that is above the memory that addressable by the bitmap;
+ */
+void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ struct efi_unaccepted_memory *unaccepted;
+ unsigned long range_start, range_end;
+ unsigned long flags;
+ u64 unit_size;
+
+ unaccepted = efi_get_unaccepted_table();
+ if (!unaccepted)
+ return;
+
+ unit_size = unaccepted->unit_size;
+
+ /*
+ * Only care for the part of the range that is represented
+ * in the bitmap.
+ */
+ if (start < unaccepted->phys_base)
+ start = unaccepted->phys_base;
+ if (end < unaccepted->phys_base)
+ return;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted->phys_base;
+ end -= unaccepted->phys_base;
+
+ /*
+ * load_unaligned_zeropad() can lead to unwanted loads across page
+ * boundaries. The unwanted loads are typically harmless. But, they
+ * might be made to totally unrelated or even unmapped memory.
+ * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
+ * #VE) to recover from these unwanted loads.
+ *
+ * But, this approach does not work for unaccepted memory. For TDX, a
+ * load from unaccepted memory will not lead to a recoverable exception
+ * within the guest. The guest will exit to the VMM where the only
+ * recourse is to terminate the guest.
+ *
+ * There are two parts to fix this issue and comprehensively avoid
+ * access to unaccepted memory. Together these ensure that an extra
+ * "guard" page is accepted in addition to the memory that needs to be
+ * used:
+ *
+ * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
+ * checks up to end+unit_size if 'end' is aligned on a unit_size
+ * boundary.
+ *
+ * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
+ * 'end' is aligned on a unit_size boundary. (immediately following
+ * this comment)
+ */
+ if (!(end % unit_size))
+ end += unit_size;
+
+ /* Make sure not to overrun the bitmap */
+ if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
+ end = unaccepted->size * unit_size * BITS_PER_BYTE;
+
+ range_start = start / unit_size;
+
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
+ DIV_ROUND_UP(end, unit_size)) {
+ unsigned long phys_start, phys_end;
+ unsigned long len = range_end - range_start;
+
+ phys_start = range_start * unit_size + unaccepted->phys_base;
+ phys_end = range_end * unit_size + unaccepted->phys_base;
+
+ arch_accept_memory(phys_start, phys_end);
+ bitmap_clear(unaccepted->bitmap, range_start, len);
+ }
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+}
+
+bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
+{
+ struct efi_unaccepted_memory *unaccepted;
+ unsigned long flags;
+ bool ret = false;
+ u64 unit_size;
+
+ unaccepted = efi_get_unaccepted_table();
+ if (!unaccepted)
+ return false;
+
+ unit_size = unaccepted->unit_size;
+
+ /*
+ * Only care for the part of the range that is represented
+ * in the bitmap.
+ */
+ if (start < unaccepted->phys_base)
+ start = unaccepted->phys_base;
+ if (end < unaccepted->phys_base)
+ return false;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted->phys_base;
+ end -= unaccepted->phys_base;
+
+ /*
+ * Also consider the unaccepted state of the *next* page. See fix #1 in
+ * the comment on load_unaligned_zeropad() in accept_memory().
+ */
+ if (!(end % unit_size))
+ end += unit_size;
+
+ /* Make sure not to overrun the bitmap */
+ if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
+ end = unaccepted->size * unit_size * BITS_PER_BYTE;
+
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ while (start < end) {
+ if (test_bit(start / unit_size, unaccepted->bitmap)) {
+ ret = true;
+ break;
+ }
+
+ start += unit_size;
+ }
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 5521f060d58e..f45c6a36551c 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -897,7 +897,7 @@ config GPIO_F7188X
help
This option enables support for GPIOs found on Fintek Super-I/O
chips F71869, F71869A, F71882FG, F71889F and F81866.
- As well as Nuvoton Super-I/O chip NCT6116D.
+ As well as Nuvoton Super-I/O chip NCT6126D.
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 9effa7769bef..f54ca5a1775e 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -48,7 +48,7 @@
/*
* Nuvoton devices.
*/
-#define SIO_NCT6116D_ID 0xD283 /* NCT6116D chipset ID */
+#define SIO_NCT6126D_ID 0xD283 /* NCT6126D chipset ID */
#define SIO_LD_GPIO_NUVOTON 0x07 /* GPIO logical device */
@@ -62,7 +62,7 @@ enum chips {
f81866,
f81804,
f81865,
- nct6116d,
+ nct6126d,
};
static const char * const f7188x_names[] = {
@@ -74,7 +74,7 @@ static const char * const f7188x_names[] = {
"f81866",
"f81804",
"f81865",
- "nct6116d",
+ "nct6126d",
};
struct f7188x_sio {
@@ -187,8 +187,8 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
/* Output mode register (0:open drain 1:push-pull). */
#define f7188x_gpio_out_mode(base) ((base) + 3)
-#define f7188x_gpio_dir_invert(type) ((type) == nct6116d)
-#define f7188x_gpio_data_single(type) ((type) == nct6116d)
+#define f7188x_gpio_dir_invert(type) ((type) == nct6126d)
+#define f7188x_gpio_data_single(type) ((type) == nct6126d)
static struct f7188x_gpio_bank f71869_gpio_bank[] = {
F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
@@ -274,7 +274,7 @@ static struct f7188x_gpio_bank f81865_gpio_bank[] = {
F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"),
};
-static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
+static struct f7188x_gpio_bank nct6126d_gpio_bank[] = {
F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"),
F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"),
F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"),
@@ -282,7 +282,7 @@ static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"),
F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"),
F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"),
- F7188X_GPIO_BANK(70, 1, 0xFC, DRVNAME "-7"),
+ F7188X_GPIO_BANK(70, 8, 0xFC, DRVNAME "-7"),
};
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -490,9 +490,9 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
data->bank = f81865_gpio_bank;
break;
- case nct6116d:
- data->nr_bank = ARRAY_SIZE(nct6116d_gpio_bank);
- data->bank = nct6116d_gpio_bank;
+ case nct6126d:
+ data->nr_bank = ARRAY_SIZE(nct6126d_gpio_bank);
+ data->bank = nct6126d_gpio_bank;
break;
default:
return -ENODEV;
@@ -559,9 +559,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F81865_ID:
sio->type = f81865;
break;
- case SIO_NCT6116D_ID:
+ case SIO_NCT6126D_ID:
sio->device = SIO_LD_GPIO_NUVOTON;
- sio->type = nct6116d;
+ sio->type = nct6126d;
break;
default:
pr_info("Unsupported Fintek device 0x%04x\n", devid);
@@ -569,7 +569,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
}
/* double check manufacturer where possible */
- if (sio->type != nct6116d) {
+ if (sio->type != nct6126d) {
manid = superio_inw(addr, SIO_FINTEK_MANID);
if (manid != SIO_FINTEK_ID) {
pr_debug("Not a Fintek device at 0x%08x\n", addr);
@@ -581,7 +581,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
err = 0;
pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr);
- if (sio->type != nct6116d)
+ if (sio->type != nct6126d)
pr_info(" revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV));
err:
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index e6a7049bef64..b32063ac845a 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -369,7 +369,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
priv->offset = i;
priv->desc = gpiochip_get_desc(gc, i);
- debugfs_create_file(name, 0200, chip->dbg_dir, priv,
+ debugfs_create_file(name, 0600, chip->dbg_dir, priv,
&gpio_mockup_debugfs_ops);
}
}
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 98939cd4a71e..745e5f67254e 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -221,8 +221,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- for (i = 0; i < ngpio; i++)
- chip->irq_number[i] = platform_get_irq(pdev, i);
+ for (i = 0; i < ngpio; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+ chip->irq_number[i] = ret;
+ }
ret = bgpio_init(&chip->gc, dev, 4,
chip->base + SIFIVE_GPIO_INPUT_VAL,
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index a1c8702f362c..8b49b0abacd5 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -696,6 +696,9 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
char **line_names;
list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->offset >= bank->num_lines)
+ continue;
+
if (line->name) {
if (line->offset > max_offset)
max_offset = line->offset;
@@ -721,8 +724,13 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
if (!line_names)
return ERR_PTR(-ENOMEM);
- list_for_each_entry(line, &bank->line_list, siblings)
- line_names[line->offset] = line->name;
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->offset >= bank->num_lines)
+ continue;
+
+ if (line->name && (line->offset <= max_offset))
+ line_names[line->offset] = line->name;
+ }
return line_names;
}
@@ -754,6 +762,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
list_for_each_entry(bank, &dev->bank_list, siblings) {
list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->offset >= bank->num_lines)
+ continue;
+
if (line->hog)
num_hogs++;
}
@@ -769,6 +780,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
list_for_each_entry(bank, &dev->bank_list, siblings) {
list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->offset >= bank->num_lines)
+ continue;
+
if (!line->hog)
continue;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 04fb05df805b..5be8ad61523e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -209,6 +209,8 @@ static int gpiochip_find_base(int ngpio)
break;
/* nope, check the space right after the chip */
base = gdev->base + gdev->ngpio;
+ if (base < GPIO_DYNAMIC_BASE)
+ base = GPIO_DYNAMIC_BASE;
}
if (gpio_is_valid(base)) {
@@ -1743,7 +1745,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
}
/* Remove all IRQ mappings and delete the domain */
- if (gc->irq.domain) {
+ if (!gc->irq.domain_is_allocated_externally && gc->irq.domain) {
unsigned int irq;
for (offset = 0; offset < gc->ngpio; offset++) {
@@ -1789,6 +1791,15 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
gc->to_irq = gpiochip_to_irq;
gc->irq.domain = domain;
+ gc->irq.domain_is_allocated_externally = true;
+
+ /*
+ * Using barrier() here to prevent compiler from reordering
+ * gc->irq.initialized before adding irqdomain.
+ */
+ barrier();
+
+ gc->irq.initialized = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index aeeec211861c..fd6e83795873 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1092,16 +1092,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- dev_warn_once(adev->dev,
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_err_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
+ }
#if !IS_ENABLED(CONFIG_AMD_PMC)
- dev_warn_once(adev->dev,
+ dev_err_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
-#endif /* CONFIG_AMD_PMC */
+ return false;
+#else
return true;
+#endif /* CONFIG_AMD_PMC */
}
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b1ca1ab6d6ad..393b6fb7a71d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1615,6 +1615,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
0x5874,
0x5940,
0x5941,
+ 0x5b70,
0x5b72,
0x5b73,
0x5b74,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index f52d0ba91a77..a7d250809da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -582,7 +582,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
if (r)
amdgpu_fence_driver_force_completion(ring);
- if (ring->fence_drv.irq_src)
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+ ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 4e2531758866..95b0f984acbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(9, 3, 0):
/* GC 10.3.7 */
case IP_VERSION(10, 3, 7):
+ /* GC 11.0.1 */
+ case IP_VERSION(11, 0, 1):
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
case IP_VERSION(10, 3, 3):
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
/* Don't enable it by default yet.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index b07c000fc8ba..4fa019c8aefc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ }
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{
int err;
@@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
adev->jpeg.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
- ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 0ca76f0f23e9..1471a1ebb034 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{
struct amdgpu_jpeg_inst {
struct amdgpu_ring ring_dec;
struct amdgpu_irq_src irq;
+ struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external;
};
@@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2bd1a54ee866..a70103ac0026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
struct amdgpu_bo_vm *vmbo;
+ bo = shadow_bo->parent;
vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&vmbo->shadow_list)) {
@@ -139,7 +140,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
- else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
+ else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
@@ -694,11 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
- INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
- /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
- * is initialized.
- */
- bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
@@ -715,6 +711,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
+ vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
+ vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
mutex_unlock(&adev->shadow_list_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9d7e6e0e73ed..a150b7a4b4aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -3548,6 +3548,9 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
void *fw_pri_cpu_addr;
int ret;
+ if (adev->psp.vbflash_image_size == 0)
+ return -EINVAL;
+
dev_info(adev->dev, "VBIOS flash to PSP started");
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
@@ -3599,13 +3602,13 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
}
static const struct bin_attribute psp_vbflash_bin_attr = {
- .attr = {.name = "psp_vbflash", .mode = 0664},
+ .attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
.write = amdgpu_psp_vbflash_write,
.read = amdgpu_psp_vbflash_read,
};
-static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL);
+static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index dc474b809604..49de3a3eebc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -581,3 +581,21 @@ void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_end(ring);
}
+
+void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
+}
+
+void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
+}
+
+void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index d8749444b689..2474cb71e476 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -227,6 +227,9 @@ struct amdgpu_ring_funcs {
int (*preempt_ib)(struct amdgpu_ring *ring);
void (*emit_mem_sync)(struct amdgpu_ring *ring);
void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
+ void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
+ void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
+ void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
};
struct amdgpu_ring {
@@ -318,10 +321,16 @@ struct amdgpu_ring {
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
+#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
+#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
+#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 62079f0e3ee8..73516abef662 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -105,6 +105,16 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
amdgpu_fence_update_start_timestamp(e->ring,
chunk->sync_seq,
ktime_get());
+ if (chunk->sync_seq ==
+ le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
+ if (chunk->cntl_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_cntl(e->ring,
+ chunk->cntl_offset);
+ if (chunk->ce_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
+ if (chunk->de_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_de(e->ring, chunk->de_offset);
+ }
amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
chunk->start,
chunk->end);
@@ -407,6 +417,17 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
amdgpu_ring_mux_end_ib(mux, ring);
}
+void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+ unsigned offset;
+
+ offset = ring->wptr & ring->buf_mask;
+
+ amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
+}
+
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
@@ -429,6 +450,10 @@ void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *r
}
chunk->start = ring->wptr;
+ /* the initialized value used to check if they are set by the ib submission*/
+ chunk->cntl_offset = ring->buf_mask + 1;
+ chunk->de_offset = ring->buf_mask + 1;
+ chunk->ce_offset = ring->buf_mask + 1;
list_add_tail(&chunk->entry, &e->list);
}
@@ -454,6 +479,41 @@ static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct a
}
}
+void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
+ struct amdgpu_ring *ring, u64 offset,
+ enum amdgpu_ring_mux_offset_type type)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry!\n");
+ return;
+ }
+
+ chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
+ if (!chunk) {
+ DRM_ERROR("cannot find chunk!\n");
+ return;
+ }
+
+ switch (type) {
+ case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
+ chunk->cntl_offset = offset;
+ break;
+ case AMDGPU_MUX_OFFSET_TYPE_DE:
+ chunk->de_offset = offset;
+ break;
+ case AMDGPU_MUX_OFFSET_TYPE_CE:
+ chunk->ce_offset = offset;
+ break;
+ default:
+ DRM_ERROR("invalid type (%d)\n", type);
+ break;
+ }
+}
+
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
index 4be45fc14954..b22d4fb2a847 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
@@ -50,6 +50,12 @@ struct amdgpu_mux_entry {
struct list_head list;
};
+enum amdgpu_ring_mux_offset_type {
+ AMDGPU_MUX_OFFSET_TYPE_CONTROL,
+ AMDGPU_MUX_OFFSET_TYPE_DE,
+ AMDGPU_MUX_OFFSET_TYPE_CE,
+};
+
struct amdgpu_ring_mux {
struct amdgpu_ring *real_ring;
@@ -72,12 +78,18 @@ struct amdgpu_ring_mux {
* @sync_seq: the fence seqno related with the saved IB.
* @start:- start location on the software ring.
* @end:- end location on the software ring.
+ * @control_offset:- the PRE_RESUME bit position used for resubmission.
+ * @de_offset:- the anchor in write_data for de meta of resubmission.
+ * @ce_offset:- the anchor in write_data for ce meta of resubmission.
*/
struct amdgpu_mux_chunk {
struct list_head entry;
uint32_t sync_seq;
u64 start;
u64 end;
+ u64 cntl_offset;
+ u64 de_offset;
+ u64 ce_offset;
};
int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
@@ -89,6 +101,8 @@ u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ri
u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
+ u64 offset, enum amdgpu_ring_mux_offset_type type);
bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux);
u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring);
@@ -97,6 +111,7 @@ void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring);
void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring);
+void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type);
const char *amdgpu_sw_ring_name(int idx);
unsigned int amdgpu_sw_ring_priority(int idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e63fcc58e8e0..2d94f1b63bd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ }
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
int err;
@@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
adev->vcn.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
- ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c730949ece7d..f1397ef66fd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -234,6 +234,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
atomic_t sched_score;
struct amdgpu_irq_src irq;
+ struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
struct dpg_pause_state pause_state;
@@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index df63dc3bca18..051c7194ab4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list(*vmbo);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 43d6a9d6a538..afacfb9b5bf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -800,7 +800,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
+ struct amdgpu_vram_reservation *rsv;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
@@ -812,8 +812,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
drm_buddy_print(mm, printer);
drm_printf(printer, "reserved:\n");
- list_for_each_entry(block, &mgr->reserved_pages, link)
- drm_buddy_block_print(mm, block, printer);
+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
+ drm_printf(printer, "%#018llx-%#018llx: %llu\n",
+ rsv->start, rsv->start + rsv->size, rsv->size);
mutex_unlock(&mgr->lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f5b5ce1051a2..ab44c1391d52 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6892,8 +6892,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v10_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
@@ -8152,8 +8154,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
+ if (!enable)
+ amdgpu_gfx_off_ctrl(adev, false);
+
gfx_v10_cntl_pg(adev, enable);
- amdgpu_gfx_off_ctrl(adev, enable);
+
+ if (enable)
+ amdgpu_gfx_off_ctrl(adev, true);
+
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f5c376276984..c4940b6ea1c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4667,24 +4667,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
- amdgpu_gfx_off_ctrl(adev, false);
- mutex_lock(&adev->gfx.gpu_clock_mutex);
if (amdgpu_sriov_vf(adev)) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
} else {
+ preempt_disable();
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ preempt_enable();
}
clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
- mutex_unlock(&adev->gfx.gpu_clock_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
+
return clock;
}
@@ -5150,8 +5153,14 @@ static int gfx_v11_0_set_powergating_state(void *handle,
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
+ if (!enable)
+ amdgpu_gfx_off_ctrl(adev, false);
+
gfx_v11_cntl_pg(adev, enable);
- amdgpu_gfx_off_ctrl(adev, enable);
+
+ if (enable)
+ amdgpu_gfx_off_ctrl(adev, true);
+
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f46d4b18a3fa..a674c8a58dc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0
-
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0
-
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -765,7 +755,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
+static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -3617,8 +3607,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v9_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
@@ -4002,36 +3994,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
- case IP_VERSION(9, 1, 0):
- preempt_disable();
- clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
- hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
- /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
- * roughly every 42 seconds.
- */
- if (hi_check != clock_hi) {
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
- clock_hi = hi_check;
- }
- preempt_enable();
- clock = clock_lo | (clock_hi << 32ULL);
- break;
- case IP_VERSION(9, 2, 2):
- preempt_disable();
- clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
- hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
- /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
- * roughly every 42 seconds.
- */
- if (hi_check != clock_hi) {
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
- clock_hi = hi_check;
- }
- preempt_enable();
- clock = clock_lo | (clock_hi << 32ULL);
- break;
default:
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
@@ -5165,7 +5127,8 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
gfx_v9_0_ring_emit_de_meta(ring,
(!amdgpu_sriov_vf(ring->adev) &&
flags & AMDGPU_IB_PREEMPTED) ?
- true : false);
+ true : false,
+ job->gds_size > 0 && job->gds_base != 0);
}
amdgpu_ring_write(ring, header);
@@ -5176,9 +5139,83 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
#endif
lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_ib_on_emit_cntl(ring);
amdgpu_ring_write(ring, control);
}
+static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
+ unsigned offset)
+{
+ u32 control = ring->ring[offset];
+
+ control |= INDIRECT_BUFFER_PRE_RESUME(1);
+ ring->ring[offset] = control;
+}
+
+static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
+ unsigned offset)
+{
+ struct amdgpu_device *adev = ring->adev;
+ void *ce_payload_cpu_addr;
+ uint64_t payload_offset, payload_size;
+
+ payload_size = sizeof(struct v9_ce_ib_state);
+
+ if (ring->is_mes_queue) {
+ payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
+ gfx[0].gfx_meta_data) +
+ offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr =
+ amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
+ } else {
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
+ }
+
+ if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
+ memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
+ } else {
+ memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
+ (ring->buf_mask + 1 - offset) << 2);
+ payload_size -= (ring->buf_mask + 1 - offset) << 2;
+ memcpy((void *)&ring->ring[0],
+ ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
+ payload_size);
+ }
+}
+
+static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
+ unsigned offset)
+{
+ struct amdgpu_device *adev = ring->adev;
+ void *de_payload_cpu_addr;
+ uint64_t payload_offset, payload_size;
+
+ payload_size = sizeof(struct v9_de_ib_state);
+
+ if (ring->is_mes_queue) {
+ payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
+ gfx[0].gfx_meta_data) +
+ offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr =
+ amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
+ } else {
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
+ }
+
+ if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
+ memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
+ } else {
+ memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
+ (ring->buf_mask + 1 - offset) << 2);
+ payload_size -= (ring->buf_mask + 1 - offset) << 2;
+ memcpy((void *)&ring->ring[0],
+ de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
+ payload_size);
+ }
+}
+
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
@@ -5374,6 +5411,8 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
+ amdgpu_ring_ib_on_emit_ce(ring);
+
if (resume)
amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
sizeof(ce_payload) >> 2);
@@ -5407,10 +5446,6 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
amdgpu_ring_alloc(ring, 13);
gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
- /*reset the CP_VMID_PREEMPT after trailing fence*/
- amdgpu_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
- 0x0);
/* assert IB preemption, emit the trailing fence */
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
@@ -5433,6 +5468,10 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
}
+ /*reset the CP_VMID_PREEMPT after trailing fence*/
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
+ 0x0);
amdgpu_ring_commit(ring);
/* deassert preemption condition */
@@ -5440,7 +5479,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
+static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
{
struct amdgpu_device *adev = ring->adev;
struct v9_de_ib_state de_payload = {0};
@@ -5471,8 +5510,10 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
PAGE_SIZE);
}
- de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
- de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
+ if (usegds) {
+ de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
+ de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
+ }
cnt = (sizeof(de_payload) >> 2) + 4 - 2;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
@@ -5483,6 +5524,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
+ amdgpu_ring_ib_on_emit_de(ring);
if (resume)
amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
sizeof(de_payload) >> 2);
@@ -6893,6 +6935,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .patch_cntl = gfx_v9_0_ring_patch_cntl,
+ .patch_de = gfx_v9_0_ring_patch_de_meta,
+ .patch_ce = gfx_v9_0_ring_patch_ce_meta,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index d95f9fe8f1c5..4116c112e8a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -31,6 +31,8 @@
#include "umc_v8_10.h"
#include "athub/athub_3_0_0_sh_mask.h"
#include "athub/athub_3_0_0_offset.h"
+#include "dcn/dcn_3_2_0_offset.h"
+#include "dcn/dcn_3_2_0_sh_mask.h"
#include "oss/osssys_6_0_0_offset.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "navi10_enum.h"
@@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
- return 0;
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ } else {
+ u32 viewport;
+ u32 pitch;
+
+ viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
+ 4);
+ }
+
+ return size;
}
static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index b040f51d9aa9..73e0dc5a10cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
- VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
+ VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
- VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
+ VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r)
return r;
}
@@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
}
return 0;
@@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
break;
- case VCN_2_6__SRCID_DJPEG0_POISON:
- case VCN_2_6__SRCID_EJPEG0_POISON:
- amdgpu_jpeg_process_poison_irq(adev, source, entry);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
.process = jpeg_v2_5_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = {
+ .set = jpeg_v2_6_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst[i].irq.num_types = 1;
adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
+
+ adev->jpeg.inst[i].ras_poison_irq.num_types = 1;
+ adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs;
}
}
@@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
.ras_block = {
.hw_ops = &jpeg_v2_6_ras_hw_ops,
+ .ras_late_init = amdgpu_jpeg_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 77e1e64aa1d1..a3d83c9f2c9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
@@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
- amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return 0;
}
@@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
break;
- case VCN_4_0__SRCID_DJPEG0_POISON:
- case VCN_4_0__SRCID_EJPEG0_POISON:
- amdgpu_jpeg_process_poison_irq(adev, source, entry);
- break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
.process = jpeg_v4_0_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
+ .set = jpeg_v4_0_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
@@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_ras_hw_ops,
+ .ras_late_init = amdgpu_jpeg_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index e1b7fca09666..5f10883da6a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
if (err)
return err;
- return psp_init_ta_microcode(psp, ucode_prefix);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) &&
+ (adev->pdev->revision == 0xa1) &&
+ (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) {
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
+ }
+ return err;
}
static int psp_v10_0_ring_create(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 6d15d5cd9e07..a2fd1ffadb59 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -301,10 +301,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
return 10000;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+ return reference_clock / 4;
return reference_clock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ab0b45d0ead1..515681c88dcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle)
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
- VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
+ VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
}
@@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle)
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
@@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
break;
- case VCN_2_6__SRCID_UVD_POISON:
- amdgpu_vcn_process_poison_irq(adev, source, entry);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
.process = vcn_v2_5_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
+ .set = vcn_v2_6_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
continue;
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
+
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
@@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v2_6_ras = {
.ras_block = {
.hw_ops = &vcn_v2_6_ras_hw_ops,
+ .ras_late_init = amdgpu_vcn_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index bf0674039598..da126ff8bcbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -129,7 +129,11 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->vcn.harvest_config & (1 << i))
continue;
- atomic_set(&adev->vcn.inst[i].sched_score, 0);
+ /* Init instance 0 sched_score to 1, so it's scheduled after other instances */
+ if (i == 0)
+ atomic_set(&adev->vcn.inst[i].sched_score, 1);
+ else
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
@@ -139,7 +143,7 @@ static int vcn_v4_0_sw_init(void *handle)
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
if (r)
return r;
@@ -305,8 +309,8 @@ static int vcn_v4_0_hw_fini(void *handle)
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
-
- amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
@@ -1976,6 +1980,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp
}
/**
+ * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @type: interrupt types
+ * @state: interrupt states
+ *
+ * Set VCN block RAS interrupt state
+ */
+static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+/**
* vcn_v4_0_process_interrupt - process VCN block interrupt
*
* @adev: amdgpu_device pointer
@@ -2007,9 +2029,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
- case VCN_4_0__SRCID_UVD_POISON:
- amdgpu_vcn_process_poison_irq(adev, source, entry);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -2024,6 +2043,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
.process = vcn_v4_0_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
+ .set = vcn_v4_0_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
/**
* vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -2041,6 +2065,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
+
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
}
}
@@ -2114,6 +2141,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v4_0_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_ras_hw_ops,
+ .ras_late_init = amdgpu_vcn_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 531f173ade2d..c0360dbebd4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- if (adev->flags & AMD_IS_APU)
- return reference_clock;
+ if (adev->flags & AMD_IS_APU) {
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ /* vbios says 48Mhz, but the actual freq is 100Mhz */
+ return 10000;
+ default:
+ return reference_clock;
+ }
+ }
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8b4b186c57f5..7acd73e5004f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2479,20 +2479,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
if (acrtc && state->stream_status[i].plane_count != 0) {
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
- DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
- acrtc->crtc_id, enable ? "en" : "dis", rc);
if (rc)
DRM_WARN("Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
- rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
- if (rc)
- DRM_WARN("Failed to enable vblank interrupts\n");
- } else {
- amdgpu_dm_crtc_disable_vblank(&acrtc->base);
- }
+ if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+
+ if (rc)
+ DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
@@ -2852,7 +2857,7 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector && aconnector->mst_root)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -6737,7 +6742,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
- if (!aconnector->mst_output_port || !aconnector->dc_sink)
+ if (!aconnector->mst_output_port)
return 0;
mst_port = aconnector->mst_output_port;
@@ -7191,7 +7196,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
+ /* most eDP supports only timings from its edid,
+ * usually only detailed timings are available
+ * from eDP edid. timings which are not from edid
+ * may damage eDP
+ */
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
@@ -8193,6 +8204,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
+ mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+
/*
* If FreeSync state on the stream has changed then we need to
* re-adjust the min/max bounds now that DC doesn't handle this
@@ -8206,10 +8223,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
-
update_planes_and_stream_adapter(dm->dc,
acrtc_state->update_type,
planes_count,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index e3762e806617..440fc0869a34 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work)
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
- enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
@@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (rc)
return rc;
- if (amdgpu_in_reset(adev)) {
- irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
- /* During gpu-reset we disable and then enable vblank irq, so
- * don't use amdgpu_irq_get/put() to avoid refcount change.
- */
- if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- rc = -EBUSY;
- } else {
- rc = (enable)
- ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
- : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
- }
+ rc = (enable)
+ ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
+ : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
if (rc)
return rc;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 52564b93f7eb..7cde67b7f0c3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1981,6 +1981,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
+static bool commit_minimal_transition_state(struct dc *dc,
+ struct dc_state *transition_base_context);
+
/**
* dc_commit_streams - Commit current stream state
*
@@ -2002,6 +2005,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct dc_state *context;
enum dc_status res = DC_OK;
struct dc_validation_set set[MAX_STREAMS] = {0};
+ struct pipe_ctx *pipe;
+ bool handle_exit_odm2to1 = false;
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
return res;
@@ -2026,6 +2031,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
+ /* Check for case where we are going from odm 2:1 to max
+ * pipe scenario. For these cases, we will call
+ * commit_minimal_transition_state() to exit out of odm 2:1
+ * first before processing new streams
+ */
+ if (stream_count == dc->res_pool->pipe_count) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->next_odm_pipe)
+ handle_exit_odm2to1 = true;
+ }
+ }
+
+ if (handle_exit_odm2to1)
+ res = commit_minimal_transition_state(dc, dc->current_state);
+
context = dc_create_state(dc);
if (!context)
goto context_alloc_fail;
@@ -3872,6 +3893,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
+ bool odm_in_use = false;
if (!transition_context)
return false;
@@ -3900,6 +3922,18 @@ static bool commit_minimal_transition_state(struct dc *dc,
}
}
+ /* If ODM is enabled and we are adding or removing planes from any ODM
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->next_odm_pipe) {
+ odm_in_use = true;
+ break;
+ }
+ }
+
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@@ -3908,7 +3942,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
- if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
dc_release_state(transition_context);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 117d80cb36fb..fe1551393b26 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1446,6 +1446,26 @@ static int acquire_first_split_pipe(
split_pipe->stream = stream;
return i;
+ } else if (split_pipe->prev_odm_pipe &&
+ split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
+ split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
+ if (split_pipe->next_odm_pipe)
+ split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
+
+ if (split_pipe->prev_odm_pipe->plane_state)
+ resource_build_scaling_params(split_pipe->prev_odm_pipe);
+
+ memset(split_pipe, 0, sizeof(*split_pipe));
+ split_pipe->stream_res.tg = pool->timing_generators[i];
+ split_pipe->plane_res.hubp = pool->hubps[i];
+ split_pipe->plane_res.ipp = pool->ipps[i];
+ split_pipe->plane_res.dpp = pool->dpps[i];
+ split_pipe->stream_res.opp = pool->opps[i];
+ split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
+ split_pipe->pipe_idx = i;
+
+ split_pipe->stream = stream;
+ return i;
}
}
return -1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 422fbf79da64..5403e9399a46 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth(
if (hubbub->funcs->program_compbuf_size)
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dc_dmub_srv_p_state_delegate(dc,
- true, context);
- context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->clks.fw_based_mclk_switching = true;
- } else {
- dc->clk_mgr->clks.fw_based_mclk_switching = false;
- }
-
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 8263a07f265f..32121db2851e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
}
void dcn30_prepare_bandwidth(struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context)
{
- bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
- /* Any transition into an FPO config should disable MCLK switching first to avoid
- * driver and FW P-State synchronization issues.
- */
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
- dc->optimized_required = true;
- context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
- }
-
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
- /*
- * enabled -> enabled: do not disable
- * enabled -> disabled: disable
- * disabled -> enabled: don't care
- * disabled -> disabled: don't care
- */
- if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
- dc_dmub_srv_p_state_delegate(dc, false, context);
-
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
- /* After disabling P-State, restore the original value to ensure we get the correct P-State
- * on the next optimize. */
- context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 47beb4ea779d..0c4c3208def1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -138,7 +138,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 100.0,
+ .pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index a131e30fd7d6..d471d58aba92 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -980,6 +980,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_caps.dongle_type !=
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
converter_disable_audio = true;
+
+ /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
+ link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index d4b7da526f0a..e8b2fc4002a5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
link[i] = stream[i].link;
bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
}
+
+ ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 58c2246918fd..f4f40459f22b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -871,13 +871,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
}
if (ret == -ENOENT) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- if (size > 0) {
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
- }
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
}
if (size == 0)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index d6d9e3b1b2c0..02e69ccff3ba 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
-static int si_set_temperature_range(struct amdgpu_device *adev)
-{
- int ret;
-
- ret = si_thermal_enable_alert(adev, false);
- if (ret)
- return ret;
- ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = si_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
-
- return ret;
-}
-
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- ret = si_set_temperature_range(adev);
- if (ret)
- return ret;
-#if 0 //TODO ?
- si_dpm_powergate_uvd(adev, true);
-#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 5633c5797e85..2ddf5198e5c4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -733,6 +733,24 @@ static int smu_late_init(void *handle)
return ret;
}
+ /*
+ * Explicitly notify PMFW the power mode the system in. Since
+ * the PMFW may boot the ASIC with a different mode.
+ * For those supporting ACDC switch via gpio, PMFW will
+ * handle the switch automatically. Driver involvement
+ * is unnecessary.
+ */
+ if (!smu->dc_controlled_by_gpio) {
+ ret = smu_set_power_source(smu,
+ adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (ret) {
+ dev_err(adev->dev, "Failed to switch to %s mode!\n",
+ adev->pm.ac_power ? "AC" : "DC");
+ return ret;
+ }
+ }
+
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index c4000518dc56..275f708db636 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu)
return 0;
ret = navi10_run_umc_cdr_workaround(smu);
- if (ret) {
+ if (ret)
dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
- return ret;
- }
-
- if (!smu->dc_controlled_by_gpio) {
- /*
- * For Navi1X, manually switch it to AC mode as PMFW
- * may boot it with DC mode.
- */
- ret = smu_v11_0_set_power_source(smu,
- adev->pm.ac_power ?
- SMU_POWER_SOURCE_AC :
- SMU_POWER_SOURCE_DC);
- if (ret) {
- dev_err(adev->dev, "Failed to switch to %s mode!\n",
- adev->pm.ac_power ? "AC" : "DC");
- return ret;
- }
- }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 75f18681e984..85d53597eb07 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
+static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
+ uint32_t *gen_speed_override,
+ uint32_t *lane_width_override)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *gen_speed_override = 0xff;
+ *lane_width_override = 0xff;
+
+ switch (adev->pdev->device) {
+ case 0x73A0:
+ case 0x73A1:
+ case 0x73A2:
+ case 0x73A3:
+ case 0x73AB:
+ case 0x73AE:
+ /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
+ *lane_width_override = 6;
+ break;
+ case 0x73E0:
+ case 0x73E1:
+ case 0x73E3:
+ *lane_width_override = 4;
+ break;
+ case 0x7420:
+ case 0x7421:
+ case 0x7422:
+ case 0x7423:
+ case 0x7424:
+ *lane_width_override = 3;
+ break;
+ default:
+ break;
+ }
+}
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-
- uint32_t smu_pcie_arg;
+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
+ uint32_t min_gen_speed, max_gen_speed;
+ uint32_t min_lane_width, max_lane_width;
+ uint32_t smu_pcie_arg;
int ret, i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
- /* lclk dpm table setup */
- for (i = 0; i < MAX_PCIE_CONF; i++) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
+ sienna_cichlid_get_override_pcie_settings(smu,
+ &gen_speed_override,
+ &lane_width_override);
+
+ /* PCIE gen speed override */
+ if (gen_speed_override != 0xff) {
+ min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+ max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+ } else {
+ min_gen_speed = MAX(0, table_member1[0]);
+ max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
+ min_gen_speed = min_gen_speed > max_gen_speed ?
+ max_gen_speed : min_gen_speed;
}
+ pcie_table->pcie_gen[0] = min_gen_speed;
+ pcie_table->pcie_gen[1] = max_gen_speed;
+
+ /* PCIE lane width override */
+ if (lane_width_override != 0xff) {
+ min_lane_width = MIN(pcie_width_cap, lane_width_override);
+ max_lane_width = MIN(pcie_width_cap, lane_width_override);
+ } else {
+ min_lane_width = MAX(1, table_member2[0]);
+ max_lane_width = MIN(pcie_width_cap, table_member2[1]);
+ min_lane_width = min_lane_width > max_lane_width ?
+ max_lane_width : min_lane_width;
+ }
+ pcie_table->pcie_lane[0] = min_lane_width;
+ pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((table_member1[i] <= pcie_gen_cap) ?
- (table_member1[i] << 8) :
- (pcie_gen_cap << 8)) |
- ((table_member2[i] <= pcie_width_cap) ?
- table_member2[i] :
- pcie_width_cap);
+ smu_pcie_arg = (i << 16 |
+ pcie_table->pcie_gen[i] << 8 |
+ pcie_table->pcie_lane[i]);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
@@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
NULL);
if (ret)
return ret;
-
- if (table_member1[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (table_member2[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 7433dcaa16e0..067b4e0b026c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_FCLK:
for (i = 0; i < count; i++) {
- ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
@@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_FCLK:
for (i = 0; i < count; i++) {
- ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 5cdc07165480..8a8ba25c9ad7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_VCLK:
case SMU_DCLK:
for (i = 0; i < count; i++) {
- ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 393c6a7b9609..ca379181081c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -573,11 +573,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
- smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
+ smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
- smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
+ smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 09405ef1e3c8..08577d1b84ec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1696,10 +1696,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
}
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+ (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+ ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_COMPUTE_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_CUSTOM);
+ } else {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
+ }
+
if (workload_type < 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 8fa9a36c38b6..6d9760eac16d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
for (i = 0; i < count; i++) {
- ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 66445964efbd..0081fa607e02 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -866,7 +866,7 @@ out:
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
@@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
goto print_clk_out;
for (i = 0; i < count; i++) {
- ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
goto print_clk_out;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 3d9ff46706fb..bba621615abf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
+ MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1770,6 +1771,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_7_get_power_limit,
.set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 04e56b0b3033..798f36cfcebd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1000,7 +1000,7 @@ out:
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
goto print_clk_out;
for (i = 0; i < count; i++) {
- ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
goto print_clk_out;
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index fbb070f63e36..6dc1a09504e1 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -119,53 +119,32 @@ err_astdp_edid_not_ready:
/*
* Launch Aspeed DP
*/
-void ast_dp_launch(struct drm_device *dev, u8 bPower)
+void ast_dp_launch(struct drm_device *dev)
{
- u32 i = 0, j = 0, WaitCount = 1;
- u8 bDPTX = 0;
+ u32 i = 0;
u8 bDPExecute = 1;
-
struct ast_device *ast = to_ast_device(dev);
- // S3 come back, need more time to wait BMC ready.
- if (bPower)
- WaitCount = 300;
-
-
- // Wait total count by different condition.
- for (j = 0; j < WaitCount; j++) {
- bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK);
-
- if (bDPTX)
- break;
+ // Wait one second then timeout.
+ while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
+ ASTDP_MCU_FW_EXECUTING) {
+ i++;
+ // wait 100 ms
msleep(100);
- }
- // 0xE : ASTDP with DPMCU FW handling
- if (bDPTX == ASTDP_DPMCU_TX) {
- // Wait one second then timeout.
- i = 0;
-
- while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, COPROCESSOR_LAUNCH) !=
- COPROCESSOR_LAUNCH) {
- i++;
- // wait 100 ms
- msleep(100);
-
- if (i >= 10) {
- // DP would not be ready.
- bDPExecute = 0;
- break;
- }
+ if (i >= 10) {
+ // DP would not be ready.
+ bDPExecute = 0;
+ break;
}
+ }
- if (bDPExecute)
- ast->tx_chip_types |= BIT(AST_TX_ASTDP);
+ if (!bDPExecute)
+ drm_err(dev, "Wait DPMCU executing timeout\n");
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
- }
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
+ (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
+ ASTDP_HOST_EDID_READ_DONE);
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index a501169cddad..5498a6676f2e 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -350,9 +350,6 @@ int ast_mode_config_init(struct ast_device *ast);
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
-/* Define for Soc scratched reg */
-#define COPROCESSOR_LAUNCH BIT(5)
-
/*
* Display Transmitter Type:
*/
@@ -480,7 +477,7 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
/* aspeed DP */
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
-void ast_dp_launch(struct drm_device *dev, u8 bPower);
+void ast_dp_launch(struct drm_device *dev);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f32ce29edba7..1f35438f614a 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -254,8 +254,13 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
case 0x0c:
ast->tx_chip_types = AST_TX_DP501_BIT;
}
- } else if (ast->chip == AST2600)
- ast_dp_launch(&ast->base, 0);
+ } else if (ast->chip == AST2600) {
+ if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
+ ASTDP_DPMCU_TX) {
+ ast->tx_chip_types = AST_TX_ASTDP_BIT;
+ ast_dp_launch(&ast->base);
+ }
+ }
/* Print stuff for diagnostic purposes */
if (ast->tx_chip_types & AST_TX_NONE_BIT)
@@ -264,6 +269,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+ drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 36374828f6c8..b3c670af6ef2 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1647,6 +1647,8 @@ static int ast_dp501_output_init(struct ast_device *ast)
static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
{
void *edid;
+ struct drm_device *dev = connector->dev;
+ struct ast_device *ast = to_ast_device(dev);
int succ;
int count;
@@ -1655,9 +1657,17 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
if (!edid)
goto err_drm_connector_update_edid_property;
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&ast->ioregs_lock);
+
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
- goto err_kfree;
+ goto err_mutex_unlock;
+
+ mutex_unlock(&ast->ioregs_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
@@ -1665,7 +1675,8 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
return count;
-err_kfree:
+err_mutex_unlock:
+ mutex_unlock(&ast->ioregs_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 71bb36b865fd..a005aec18a02 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -380,7 +380,8 @@ void ast_post_gpu(struct drm_device *dev)
ast_set_def_ext_reg(dev);
if (ast->chip == AST2600) {
- ast_dp_launch(dev, 1);
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+ ast_dp_launch(dev);
} else if (ast->config_mode == ast_use_p2a) {
if (ast->chip == AST2500)
ast_post_chip_2500(dev);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 7a748785c545..4676cf2900df 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
if (refclk_lut[i] == refclk_rate)
break;
+ /* avoid buffer overflow and "1" is the default rate in the datasheet. */
+ if (i >= refclk_lut_size)
+ i = 1;
+
regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
REFCLK_FREQ(i));
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 38dab76ae69e..e2e21ce79510 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3404,7 +3404,7 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
/* Skip failed payloads */
if (payload->vc_start_slot == -1) {
- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name);
return -EIO;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6bb1b8b27d7a..fd27f1978635 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1545,17 +1545,19 @@ static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
}
}
-static void __fill_var(struct fb_var_screeninfo *var,
+static void __fill_var(struct fb_var_screeninfo *var, struct fb_info *info,
struct drm_framebuffer *fb)
{
int i;
var->xres_virtual = fb->width;
var->yres_virtual = fb->height;
- var->accel_flags = FB_ACCELF_TEXT;
+ var->accel_flags = 0;
var->bits_per_pixel = drm_format_info_bpp(fb->format, 0);
- var->height = var->width = 0;
+ var->height = info->var.height;
+ var->width = info->var.width;
+
var->left_margin = var->right_margin = 0;
var->upper_margin = var->lower_margin = 0;
var->hsync_len = var->vsync_len = 0;
@@ -1618,7 +1620,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- __fill_var(var, fb);
+ __fill_var(var, info, fb);
/*
* fb_pan_display() validates this, but fb_set_par() doesn't and just
@@ -2074,7 +2076,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xoffset = 0;
info->var.yoffset = 0;
- __fill_var(&info->var, fb);
+ __fill_var(&info->var, info, fb);
info->var.activate = FB_ACTIVATE_NOW;
drm_fb_helper_fill_pixel_fmt(&info->var, format);
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 4cf214de50c4..c21c3f623033 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -264,28 +264,10 @@ void drmm_kfree(struct drm_device *dev, void *data)
}
EXPORT_SYMBOL(drmm_kfree);
-static void drmm_mutex_release(struct drm_device *dev, void *res)
+void __drmm_mutex_release(struct drm_device *dev, void *res)
{
struct mutex *lock = res;
mutex_destroy(lock);
}
-
-/**
- * drmm_mutex_init - &drm_device-managed mutex_init()
- * @dev: DRM device
- * @lock: lock to be initialized
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
- *
- * This is a &drm_device-managed version of mutex_init(). The initialized
- * lock is automatically destroyed on the final drm_dev_put().
- */
-int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
-{
- mutex_init(lock);
-
- return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
-}
-EXPORT_SYMBOL(drmm_mutex_init);
+EXPORT_SYMBOL(__drmm_mutex_release);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index b1a38e6ce2f8..0cb646cb04ee 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -179,7 +179,7 @@ static const struct dmi_system_id orientation_data[] = {
}, { /* AYA NEO AIR */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AIR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO NEXT */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index ec784e58da5c..414e585ec7dd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
- if (runqueue_node->async)
+ if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 74ea3c26dead..1a5ae781b56c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
return 0;
}
-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4d56c8c799c5..f5e1adfcaa51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev)
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
-
- return -EINVAL;
}
component_del(&pdev->dev, &vidi_component_ops);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 084a483f9776..2aaaba090cc0 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1453,6 +1453,18 @@ static u8 tgl_calc_voltage_level(int cdclk)
return 0;
}
+static u8 rplu_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 3;
+ else if (cdclk > 480000)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
+
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
@@ -3242,6 +3254,13 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
.calc_voltage_level = tgl_calc_voltage_level,
};
+static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = rplu_calc_voltage_level,
+};
+
static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3384,14 +3403,17 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
- dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
- else if (IS_ADLP_RPLU(dev_priv))
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ } else if (IS_ADLP_RPLU(dev_priv)) {
dev_priv->display.cdclk.table = rplu_cdclk_table;
- else
+ dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
+ } else {
dev_priv->display.cdclk.table = adlp_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ }
} else if (IS_ROCKETLAKE(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = rkl_cdclk_table;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3c29792137a5..0aae9a1eb3d5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1851,9 +1851,17 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_disable_shared_dpll(old_crtc_state);
- intel_encoders_post_pll_disable(state, crtc);
+ if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
+ struct intel_crtc *slave_crtc;
+
+ intel_encoders_post_pll_disable(state, crtc);
- intel_dmc_disable_pipe(i915, crtc->pipe);
+ intel_dmc_disable_pipe(i915, crtc->pipe);
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(old_crtc_state))
+ intel_dmc_disable_pipe(i915, slave_crtc->pipe);
+ }
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 705915d50565..524bd6da260c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -129,7 +129,7 @@ static int intel_dp_aux_sync_len(void)
static int intel_dp_aux_fw_sync_len(void)
{
- int precharge = 16; /* 10-16 */
+ int precharge = 10; /* 10-16 */
int preamble = 8;
return precharge + preamble;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 650232c4892b..b183efab04a1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -204,8 +204,6 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct intel_gt *gt = dev_priv->media_gt;
- struct intel_gsc_uc *gsc = &gt->uc.gsc;
bool capable = false;
/* I915 support for HDCP2.2 */
@@ -213,9 +211,13 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return false;
/* If MTL+ make sure gsc is loaded and proxy is setup */
- if (intel_hdcp_gsc_cs_required(dev_priv))
- if (!intel_uc_fw_is_running(&gsc->fw))
+ if (intel_hdcp_gsc_cs_required(dev_priv)) {
+ struct intel_gt *gt = dev_priv->media_gt;
+ struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
+
+ if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
return false;
+ }
/* MEI/GSC interface is solid depending on which is used */
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index a81fa6a20f5a..7b516b1a4915 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg)
continue;
ce = intel_context_create(data[m].ce[0]->engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {
@@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg)
worker = kthread_create_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
- if (IS_ERR(worker))
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
goto out;
+ }
data[n].worker = worker;
}
@@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg)
}
}
- if (igt_live_test_end(&t))
- err = -EIO;
+ if (igt_live_test_end(&t)) {
+ err = err ?: -EIO;
+ break;
+ }
}
out:
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 736b89a8ecf5..4202df5b8c12 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1530,8 +1530,8 @@ static int live_busywait_preempt(void *arg)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
- int err = -ENOMEM;
u32 *map;
+ int err;
/*
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
@@ -1539,13 +1539,17 @@ static int live_busywait_preempt(void *arg)
*/
ctx_hi = kernel_context(gt->i915, NULL);
- if (!ctx_hi)
- return -ENOMEM;
+ if (IS_ERR(ctx_hi))
+ return PTR_ERR(ctx_hi);
+
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
- if (!ctx_lo)
+ if (IS_ERR(ctx_lo)) {
+ err = PTR_ERR(ctx_lo);
goto err_ctx_hi;
+ }
+
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 050b8ae7b8e7..3035cba2c6a2 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
stream->oa_buffer.last_ctx_id = ctx_id;
}
- /*
- * Clear out the report id and timestamp as a means to detect unlanded
- * reports.
- */
- oa_report_id_clear(stream, report32);
- oa_timestamp_clear(stream, report32);
+ if (is_power_of_2(report_size)) {
+ /*
+ * Clear out the report id and timestamp as a means
+ * to detect unlanded reports.
+ */
+ oa_report_id_clear(stream, report32);
+ oa_timestamp_clear(stream, report32);
+ } else {
+ /* Zero out the entire report */
+ memset(report32, 0, report_size);
+ }
}
if (start_offset != *offset) {
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index ff003403fbbc..ffd91a5ee299 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context)
{
- drm_sched_entity_fini(&context->base);
+ drm_sched_entity_destroy(&context->base);
}
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 0f2dd26755df..af3ce5a6a636 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -642,6 +642,11 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e16b4b3f8535..8914992378f2 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1526,8 +1526,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (!pdev)
return -ENODEV;
- mutex_init(&gmu->lock);
-
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 9fb214f150dd..52da3795b175 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1981,6 +1981,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
+ mutex_init(&a6xx_gpu->gmu.lock);
+
adreno_gpu->registers = NULL;
/*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 2b3ae84057df..bdcd554fc8a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -98,17 +98,17 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
static const struct dpu_lm_cfg msm8998_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0),
+ &msm8998_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1),
+ &msm8998_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_2, LM_0, 0),
+ &msm8998_lm_sblk, PINGPONG_2, LM_5, 0),
LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
&msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
- &msm8998_lm_sblk, PINGPONG_3, LM_1, 0),
+ &msm8998_lm_sblk, PINGPONG_3, LM_2, 0),
};
static const struct dpu_pingpong_cfg msm8998_pp[] = {
@@ -134,10 +134,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = {
};
static const struct dpu_intf_cfg msm8998_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
- INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
- INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
- INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+ INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
};
static const struct dpu_perf_cfg msm8998_perf_data = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 282d410269ff..42b0e58624d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -128,10 +128,10 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
};
static const struct dpu_pingpong_cfg sm8150_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index c57400265f28..e3bdfe7b30f1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -116,10 +116,10 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
};
static const struct dpu_pingpong_cfg sc8180x_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 2c40229ea515..ed130582873c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -129,10 +129,10 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
};
static const struct dpu_pingpong_cfg sm8250_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 8799ed757119..a46b11730a4d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -80,8 +80,8 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = {
};
static const struct dpu_pingpong_cfg sc7180_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
+ PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, -1, -1),
+ PP_BLK("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk, -1, -1),
};
static const struct dpu_intf_cfg sc7180_intf[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 6f04d8f85c92..988d820f7ef2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -122,7 +122,6 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = {
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
BIT(MDP_SSPP_TOP0_INTR2) | \
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
BIT(MDP_INTF1_INTR),
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 303492d62a5c..c9003dcc1a59 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -112,7 +112,6 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
.mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
BIT(MDP_SSPP_TOP0_INTR2) | \
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
- BIT(MDP_INTF0_INTR) | \
BIT(MDP_INTF1_INTR),
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index ca107ca8de46..4f6a965bcd90 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -127,22 +127,22 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
};
static const struct dpu_pingpong_cfg sm8350_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
- PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
- PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
- PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
-1),
- PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
-1),
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 5957de185984..6b2c7eae71d9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -87,10 +87,10 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = {
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
- PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
- PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
- PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
- PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
static const struct dpu_intf_cfg sc7280_intf[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 9aab110b8c44..706d0f13b598 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -121,18 +121,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
};
static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
- PP_BLK_TE("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
- PP_BLK_TE("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
- PP_BLK_TE("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
- PP_BLK_TE("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk_te,
- DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
+ PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
+ PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
+ PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
+ PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
+ PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
+ PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
};
static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 02a259b6b426..4ecb3df5cbc0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -128,28 +128,28 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
};
/* FIXME: interrupts */
static const struct dpu_pingpong_cfg sm8450_pp[] = {
- PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
- PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+ PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
- PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
- PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
- PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
-1),
- PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
-1),
- PP_BLK("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sc7280_pp_sblk,
-1,
-1),
- PP_BLK("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sdm845_pp_sblk,
+ PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sc7280_pp_sblk,
-1,
-1),
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 9e403034093f..d0ab351b6a8b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -132,28 +132,28 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = {
&sm8150_dspp_sblk),
};
static const struct dpu_pingpong_cfg sm8550_pp[] = {
- PP_BLK_DIPHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
-1),
- PP_BLK_DIPHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
-1),
- PP_BLK_DIPHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
-1),
- PP_BLK_DIPHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
-1),
- PP_BLK_DIPHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
-1),
- PP_BLK_DIPHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
-1),
- PP_BLK_DIPHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
-1,
-1),
- PP_BLK_DIPHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
+ PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
-1,
-1),
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 03f162af1a50..5d994bce696f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -491,7 +491,7 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
.len = 0x20, .version = 0x20000},
};
-#define PP_BLK_DIPHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
+#define PP_BLK_DITHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0, \
@@ -587,12 +587,12 @@ static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
{
- .pps = 1088 * 1920 * 30,
+ .pps = 1920 * 1080 * 30,
.ot_limit = 2,
},
{
- .pps = 1088 * 1920 * 60,
- .ot_limit = 6,
+ .pps = 1920 * 1080 * 60,
+ .ot_limit = 4,
},
{
.pps = 3840 * 2160 * 30,
@@ -705,10 +705,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_linear[] = {
{.fl = 10, .lut = 0x1555b},
{.fl = 11, .lut = 0x5555b},
{.fl = 12, .lut = 0x15555b},
- {.fl = 13, .lut = 0x55555b},
- {.fl = 14, .lut = 0},
- {.fl = 1, .lut = 0x1b},
- {.fl = 0, .lut = 0}
+ {.fl = 0, .lut = 0x55555b}
};
static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
@@ -730,9 +727,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = {
{.fl = 10, .lut = 0x1aaff},
{.fl = 11, .lut = 0x5aaff},
{.fl = 12, .lut = 0x15aaff},
- {.fl = 13, .lut = 0x55aaff},
- {.fl = 1, .lut = 0x1aaff},
- {.fl = 0, .lut = 0},
+ {.fl = 0, .lut = 0x55aaff},
};
static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 53326f25e40e..17f3e7e4f194 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -15,7 +15,7 @@
/*
* Register offsets in MDSS register file for the interrupt registers
- * w.r.t. to the MDP base
+ * w.r.t. the MDP base
*/
#define MDP_SSPP_TOP0_OFF 0x0
#define MDP_INTF_0_OFF 0x6A000
@@ -24,20 +24,23 @@
#define MDP_INTF_3_OFF 0x6B800
#define MDP_INTF_4_OFF 0x6C000
#define MDP_INTF_5_OFF 0x6C800
+#define INTF_INTR_EN 0x1c0
+#define INTF_INTR_STATUS 0x1c4
+#define INTF_INTR_CLEAR 0x1c8
#define MDP_AD4_0_OFF 0x7C000
#define MDP_AD4_1_OFF 0x7D000
#define MDP_AD4_INTR_EN_OFF 0x41c
#define MDP_AD4_INTR_CLEAR_OFF 0x424
#define MDP_AD4_INTR_STATUS_OFF 0x420
-#define MDP_INTF_0_OFF_REV_7xxx 0x34000
-#define MDP_INTF_1_OFF_REV_7xxx 0x35000
-#define MDP_INTF_2_OFF_REV_7xxx 0x36000
-#define MDP_INTF_3_OFF_REV_7xxx 0x37000
-#define MDP_INTF_4_OFF_REV_7xxx 0x38000
-#define MDP_INTF_5_OFF_REV_7xxx 0x39000
-#define MDP_INTF_6_OFF_REV_7xxx 0x3a000
-#define MDP_INTF_7_OFF_REV_7xxx 0x3b000
-#define MDP_INTF_8_OFF_REV_7xxx 0x3c000
+#define MDP_INTF_0_OFF_REV_7xxx 0x34000
+#define MDP_INTF_1_OFF_REV_7xxx 0x35000
+#define MDP_INTF_2_OFF_REV_7xxx 0x36000
+#define MDP_INTF_3_OFF_REV_7xxx 0x37000
+#define MDP_INTF_4_OFF_REV_7xxx 0x38000
+#define MDP_INTF_5_OFF_REV_7xxx 0x39000
+#define MDP_INTF_6_OFF_REV_7xxx 0x3a000
+#define MDP_INTF_7_OFF_REV_7xxx 0x3b000
+#define MDP_INTF_8_OFF_REV_7xxx 0x3c000
/**
* struct dpu_intr_reg - array of DPU register sets
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 84ee2efa9c66..b9dddf576c02 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -56,11 +56,6 @@
#define INTF_TPG_RGB_MAPPING 0x11C
#define INTF_PROG_FETCH_START 0x170
#define INTF_PROG_ROT_START 0x174
-
-#define INTF_FRAME_LINE_COUNT_EN 0x0A8
-#define INTF_FRAME_COUNT 0x0AC
-#define INTF_LINE_COUNT 0x0B0
-
#define INTF_MUX 0x25C
#define INTF_STATUS 0x26C
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 2d28afdf860e..a3e413d27717 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -61,6 +61,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
for (i = 0; i < m->wb_count; i++) {
if (wb == m->wb[i].id) {
b->blk_addr = addr + m->wb[i].base;
+ b->log_mask = DPU_DBG_MASK_WB;
return &m->wb[i];
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
index feb9a729844a..5acd5683d25a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -21,9 +21,6 @@
#define HIST_INTR_EN 0x01c
#define HIST_INTR_STATUS 0x020
#define HIST_INTR_CLEAR 0x024
-#define INTF_INTR_EN 0x1C0
-#define INTF_INTR_STATUS 0x1C4
-#define INTF_INTR_CLEAR 0x1C8
#define SPLIT_DISPLAY_EN 0x2F4
#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 6666783e1468..1245c7aa49df 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -593,6 +593,18 @@ static struct hdmi_codec_pdata codec_data = {
.i2s = 1,
};
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
+{
+ struct dp_audio_private *audio_priv;
+
+ audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+ if (audio_priv->audio_pdev) {
+ platform_device_unregister(audio_priv->audio_pdev);
+ audio_priv->audio_pdev = NULL;
+ }
+}
+
int dp_register_audio_driver(struct device *dev,
struct dp_audio *dp_audio)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 84e5f4a5d26b..4ab78880af82 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -53,6 +53,8 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
int dp_register_audio_driver(struct device *dev,
struct dp_audio *dp_audio);
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
+
/**
* dp_audio_put()
*
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 7a8cf1c8233d..5142aeb705a4 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -620,7 +620,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
config & DP_DP_HPD_INT_MASK);
}
-void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
@@ -635,6 +635,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
}
+void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+
+ reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
+ dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+
+ dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
+}
+
static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog)
{
/* trigger sdp */
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 82376a2697ef..38786e855b51 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -104,7 +104,8 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
u32 intr_mask, bool en);
-void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter);
u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3e13acdfa7e5..03b0eda6df54 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -28,6 +28,10 @@
#include "dp_audio.h"
#include "dp_debug.h"
+static bool psr_enabled = false;
+module_param(psr_enabled, bool, 0);
+MODULE_PARM_DESC(psr_enabled, "enable PSR for eDP and DP displays");
+
#define HPD_STRING_SIZE 30
enum {
@@ -326,6 +330,7 @@ static void dp_display_unbind(struct device *dev, struct device *master,
kthread_stop(dp->ev_tsk);
dp_power_client_deinit(dp->power);
+ dp_unregister_audio_driver(dev, dp->audio);
dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
@@ -406,7 +411,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
edid = dp->panel->edid;
- dp->dp_display.psr_supported = dp->panel->psr_cap.version;
+ dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
dp->audio_supported = drm_detect_monitor_audio(edid);
dp_panel_handle_sink_request(dp->panel);
@@ -615,12 +620,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->hpd_state = ST_MAINLINK_READY;
}
- /* enable HDP irq_hpd/replug interrupt */
- if (dp->dp_display.internal_hpd)
- dp_catalog_hpd_config_intr(dp->catalog,
- DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
- true);
-
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
@@ -658,12 +657,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- /* disable irq_hpd/replug interrupts */
- if (dp->dp_display.internal_hpd)
- dp_catalog_hpd_config_intr(dp->catalog,
- DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
- false);
-
/* unplugged, no more irq_hpd handle */
dp_del_event(dp, EV_IRQ_HPD_INT);
@@ -687,10 +680,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- /* disable HPD plug interrupts */
- if (dp->dp_display.internal_hpd)
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
-
/*
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
@@ -706,10 +695,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(&dp->dp_display, false);
- /* enable HDP plug interrupt to prepare for next plugin */
- if (dp->dp_display.internal_hpd)
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
-
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
@@ -1082,26 +1067,6 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
mutex_unlock(&dp_display->event_mutex);
}
-static void dp_display_config_hpd(struct dp_display_private *dp)
-{
-
- dp_display_host_init(dp);
- dp_catalog_ctrl_hpd_config(dp->catalog);
-
- /* Enable plug and unplug interrupts only if requested */
- if (dp->dp_display.internal_hpd)
- dp_catalog_hpd_config_intr(dp->catalog,
- DP_DP_HPD_PLUG_INT_MASK |
- DP_DP_HPD_UNPLUG_INT_MASK,
- true);
-
- /* Enable interrupt first time
- * we are leaving dp clocks on during disconnect
- * and never disable interrupt
- */
- enable_irq(dp->irq);
-}
-
void dp_display_set_psr(struct msm_dp *dp_display, bool enter)
{
struct dp_display_private *dp;
@@ -1176,7 +1141,7 @@ static int hpd_event_thread(void *data)
switch (todo->event_id) {
case EV_HPD_INIT_SETUP:
- dp_display_config_hpd(dp_priv);
+ dp_display_host_init(dp_priv);
break;
case EV_HPD_PLUG_INT:
dp_hpd_plug_handle(dp_priv, todo->data);
@@ -1282,7 +1247,6 @@ int dp_display_request_irq(struct msm_dp *dp_display)
dp->irq, rc);
return rc;
}
- disable_irq(dp->irq);
return 0;
}
@@ -1394,13 +1358,8 @@ static int dp_pm_resume(struct device *dev)
/* turn on dp ctrl/phy */
dp_display_host_init(dp);
- dp_catalog_ctrl_hpd_config(dp->catalog);
-
- if (dp->dp_display.internal_hpd)
- dp_catalog_hpd_config_intr(dp->catalog,
- DP_DP_HPD_PLUG_INT_MASK |
- DP_DP_HPD_UNPLUG_INT_MASK,
- true);
+ if (dp_display->is_edp)
+ dp_catalog_ctrl_hpd_enable(dp->catalog);
if (dp_catalog_link_is_connected(dp->catalog)) {
/*
@@ -1568,9 +1527,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
if (aux_bus && dp->is_edp) {
dp_display_host_init(dp_priv);
- dp_catalog_ctrl_hpd_config(dp_priv->catalog);
+ dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
dp_display_host_phy_init(dp_priv);
- enable_irq(dp_priv->irq);
/*
* The code below assumes that the panel will finish probing
@@ -1612,7 +1570,6 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
error:
if (dp->is_edp) {
- disable_irq(dp_priv->irq);
dp_display_host_phy_exit(dp_priv);
dp_display_host_deinit(dp_priv);
}
@@ -1801,16 +1758,31 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
+ struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ mutex_lock(&dp->event_mutex);
+ dp_catalog_ctrl_hpd_enable(dp->catalog);
+
+ /* enable HDP interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
dp_display->internal_hpd = true;
+ mutex_unlock(&dp->event_mutex);
}
void dp_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
struct msm_dp *dp_display = dp_bridge->dp_display;
+ struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ mutex_lock(&dp->event_mutex);
+ /* disable HDP interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+ dp_catalog_ctrl_hpd_disable(dp->catalog);
dp_display->internal_hpd = false;
+ mutex_unlock(&dp->event_mutex);
}
void dp_bridge_hpd_notify(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index d77fa9793c54..9c45d641b521 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -155,6 +155,8 @@ static bool can_do_async(struct drm_atomic_state *state,
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
return false;
+ if (!crtc_state->active)
+ return false;
if (++num_crtcs > 1)
return false;
*async_crtc = crtc;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b4cfa44a8a5c..463ca4164f5f 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -449,6 +449,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_cleanup_mode_config;
+ dma_set_max_seg_size(dev, UINT_MAX);
+
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
@@ -459,8 +461,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- dma_set_max_seg_size(dev, UINT_MAX);
-
msm_gem_shrinker_init(ddev);
if (priv->kms_init) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index db6c4e281d75..cd39b9d8abdb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -219,7 +219,8 @@ static void put_pages(struct drm_gem_object *obj)
}
}
-static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
+ unsigned madv)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -227,7 +228,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ if (GEM_WARN_ON(msm_obj->madv > madv)) {
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
@@ -248,7 +251,7 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
struct page **p;
msm_gem_lock(obj);
- p = msm_gem_pin_pages_locked(obj);
+ p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
msm_gem_unlock(obj);
return p;
@@ -473,10 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
- return -EBUSY;
-
- pages = msm_gem_pin_pages_locked(obj);
+ pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -699,13 +699,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
if (obj->import_attach)
return ERR_PTR(-ENODEV);
- if (GEM_WARN_ON(msm_obj->madv > madv)) {
- DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
- return ERR_PTR(-EBUSY);
- }
-
- pages = msm_gem_pin_pages_locked(obj);
+ pages = msm_gem_pin_pages_locked(obj, madv);
if (IS_ERR(pages))
return ERR_CAST(pages);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index aff18c2f600a..9f5933c75e3d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -722,7 +722,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
- struct msm_gem_submit *submit;
+ struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
@@ -769,13 +769,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
- return ret;
+ goto out_post_unlock;
}
}
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
- if (IS_ERR(submit))
- return PTR_ERR(submit);
+ if (IS_ERR(submit)) {
+ ret = PTR_ERR(submit);
+ goto out_post_unlock;
+ }
trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
args->nr_bos, args->nr_cmds);
@@ -962,11 +964,20 @@ out:
if (has_ww_ticket)
ww_acquire_fini(&submit->ticket);
out_unlock:
- if (ret && (out_fence_fd >= 0))
- put_unused_fd(out_fence_fd);
mutex_unlock(&queue->lock);
out_post_unlock:
- msm_gem_submit_put(submit);
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
+
+ if (!IS_ERR_OR_NULL(submit)) {
+ msm_gem_submit_put(submit);
+ } else {
+ /*
+ * If the submit hasn't yet taken ownership of the queue
+ * then we need to drop the reference ourself:
+ */
+ msm_submitqueue_put(queue);
+ }
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 418e1e06cdde..5cc8d358cc97 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -234,7 +234,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Get the pagetable configuration from the domain */
if (adreno_smmu->cookie)
ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
- if (!ttbr1_cfg)
+
+ /*
+ * If you hit this WARN_ONCE() you are probably missing an entry in
+ * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
+ */
+ if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
return ERR_PTR(-ENODEV);
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
@@ -410,7 +415,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR(mmu))
+ if (IS_ERR_OR_NULL(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8cf096f841a9..a2ae8c21e4dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
int optimus_funcs;
struct pci_dev *parent_pdev;
+ if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ return;
+
*has_pr3 = false;
parent_pdev = pci_upstream_bridge(pdev);
if (parent_pdev) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 086b66b60d91..f75c6f09dd2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -730,7 +730,8 @@ out:
#endif
nouveau_connector_set_edid(nv_connector, edid);
- nouveau_connector_set_encoder(connector, nv_encoder);
+ if (nv_encoder)
+ nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
@@ -966,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
/* Determine display colour depth for everything except LVDS now,
* DP requires this before mode_valid() is called.
*/
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
nouveau_connector_detect_depth(connector);
/* Find the native mode if this is a digital panel, if we didn't
@@ -987,7 +988,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* "native" mode as some VBIOS tables require us to use the
* pixel clock as part of the lookup...
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index cc7c5b4a05fd..7aac9384600e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -137,10 +137,16 @@ nouveau_name(struct drm_device *dev)
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
- if (!dma_fence_is_signaled(fence))
- return false;
- dma_fence_put(fence);
- return true;
+ bool ret = true;
+
+ spin_lock_irq(fence->lock);
+ if (!dma_fence_is_signaled_locked(fence))
+ ret = false;
+ spin_unlock_irq(fence->lock);
+
+ if (ret == true)
+ dma_fence_put(fence);
+ return ret;
}
static void
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 6afdf260a4e2..b9fe926a49e8 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -53,7 +53,7 @@ pl111_mode_valid(struct drm_simple_display_pipe *pipe,
{
struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
- u32 cpp = priv->variant->fb_bpp / 8;
+ u32 cpp = DIV_ROUND_UP(priv->variant->fb_depth, 8);
u64 bw;
/*
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 2a46b5bd8576..d1fe756444ee 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -114,7 +114,7 @@ struct drm_minor;
* extensions to the control register
* @formats: array of supported pixel formats on this variant
* @nformats: the length of the array of supported pixel formats
- * @fb_bpp: desired bits per pixel on the default framebuffer
+ * @fb_depth: desired depth per pixel on the default framebuffer
*/
struct pl111_variant_data {
const char *name;
@@ -126,7 +126,7 @@ struct pl111_variant_data {
bool st_bitmux_control;
const u32 *formats;
unsigned int nformats;
- unsigned int fb_bpp;
+ unsigned int fb_depth;
};
struct pl111_drm_dev_private {
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 4b2a9e9753f6..43049c8028b2 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -308,7 +308,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (ret < 0)
goto dev_put;
- drm_fbdev_dma_setup(drm, priv->variant->fb_bpp);
+ drm_fbdev_dma_setup(drm, priv->variant->fb_depth);
return 0;
@@ -351,7 +351,7 @@ static const struct pl111_variant_data pl110_variant = {
.is_pl110 = true,
.formats = pl110_pixel_formats,
.nformats = ARRAY_SIZE(pl110_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/* RealView, Versatile Express etc use this modern variant */
@@ -376,7 +376,7 @@ static const struct pl111_variant_data pl111_variant = {
.name = "PL111",
.formats = pl111_pixel_formats,
.nformats = ARRAY_SIZE(pl111_pixel_formats),
- .fb_bpp = 32,
+ .fb_depth = 32,
};
static const u32 pl110_nomadik_pixel_formats[] = {
@@ -405,7 +405,7 @@ static const struct pl111_variant_data pl110_nomadik_variant = {
.is_lcdc = true,
.st_bitmux_control = true,
.broken_vblank = true,
- .fb_bpp = 16,
+ .fb_depth = 16,
};
static const struct amba_id pl111_id_table[] = {
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1b436b75fd39..00c3ebd32359 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -316,7 +316,7 @@ static const struct pl111_variant_data pl110_integrator = {
.broken_vblank = true,
.formats = pl110_integrator_pixel_formats,
.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/*
@@ -330,7 +330,7 @@ static const struct pl111_variant_data pl110_impd1 = {
.broken_vblank = true,
.formats = pl110_integrator_pixel_formats,
.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 15,
};
/*
@@ -343,7 +343,7 @@ static const struct pl111_variant_data pl110_versatile = {
.external_bgr = true,
.formats = pl110_versatile_pixel_formats,
.nformats = ARRAY_SIZE(pl110_versatile_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/*
@@ -355,7 +355,7 @@ static const struct pl111_variant_data pl111_realview = {
.name = "PL111 RealView",
.formats = pl111_realview_pixel_formats,
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/*
@@ -367,7 +367,7 @@ static const struct pl111_variant_data pl111_vexpress = {
.name = "PL111 Versatile Express",
.formats = pl111_realview_pixel_formats,
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
.broken_clockdivider = true,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index fe76e29910ef..8f6c3aef0962 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -307,6 +307,7 @@ static void radeon_fbdev_client_unregister(struct drm_client_dev *client)
if (fb_helper->info) {
vga_switcheroo_client_fb_set(rdev->pdev, NULL);
+ drm_helper_force_disable_all(dev);
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bdc5af23f005..d3f5ddbc1704 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
- robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 3377fbc71f65..c4dda908666c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work)
static void radeon_dp_work_func(struct work_struct *work)
{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ dp_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ mutex_lock(&mode_config->mutex);
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ mutex_unlock(&mode_config->mutex);
}
/**
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 8c183639603e..aea5a90ff98b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1141,9 +1141,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list)
/*
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 7ae5f27df54d..c6bdb9c4ef3e 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -587,6 +587,8 @@ static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d79e946acdcb..5d29abac2300 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -529,6 +529,7 @@
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
+#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 0fcfd85fea0f..5e1a412fd28f 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -286,7 +286,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
struct hidpp_report *message,
struct hidpp_report *response)
{
- int ret;
+ int ret = -1;
int max_retries = 3;
mutex_lock(&hidpp->send_mutex);
@@ -300,13 +300,13 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
*/
*response = *message;
- for (; max_retries != 0; max_retries--) {
+ for (; max_retries != 0 && ret; max_retries--) {
ret = __hidpp_send_report(hidpp->hid_dev, message);
if (ret) {
dbg_hid("__hidpp_send_report returned err: %d\n", ret);
memset(response, 0, sizeof(struct hidpp_report));
- goto exit;
+ break;
}
if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
@@ -314,13 +314,14 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
dbg_hid("%s:timeout waiting for response\n", __func__);
memset(response, 0, sizeof(struct hidpp_report));
ret = -ETIMEDOUT;
+ break;
}
if (response->report_id == REPORT_ID_HIDPP_SHORT &&
response->rap.sub_id == HIDPP_ERROR) {
ret = response->rap.params[1];
dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
- goto exit;
+ break;
}
if ((response->report_id == REPORT_ID_HIDPP_LONG ||
@@ -329,13 +330,12 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
ret = response->fap.params[1];
if (ret != HIDPP20_ERROR_BUSY) {
dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
- goto exit;
+ break;
}
dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
}
}
-exit:
mutex_unlock(&hidpp->send_mutex);
return ret;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8214896adada..76e5353aca0c 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2224,7 +2224,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
} else if (strstr(product_name, "Wacom") ||
strstr(product_name, "wacom") ||
strstr(product_name, "WACOM")) {
- strscpy(name, product_name, sizeof(name));
+ if (strscpy(name, product_name, sizeof(name)) < 0) {
+ hid_warn(wacom->hdev, "String overflow while assembling device name");
+ }
} else {
snprintf(name, sizeof(name), "Wacom %s", product_name);
}
@@ -2242,7 +2244,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
} else {
- strscpy(name, features->name, sizeof(name));
+ if (strscpy(name, features->name, sizeof(name)) < 0) {
+ hid_warn(wacom->hdev, "String overflow while assembling device name");
+ }
}
snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s",
@@ -2410,8 +2414,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail_quirks;
}
- if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
+ if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
error = hid_hw_open(hdev);
+ if (error) {
+ hid_err(hdev, "hw open failed\n");
+ goto fail_quirks;
+ }
+ }
wacom_set_shared_values(wacom_wac);
devres_close_group(&hdev->dev, wacom);
@@ -2500,8 +2509,10 @@ static void wacom_wireless_work(struct work_struct *work)
goto fail;
}
- strscpy(wacom_wac->name, wacom_wac1->name,
- sizeof(wacom_wac->name));
+ if (strscpy(wacom_wac->name, wacom_wac1->name,
+ sizeof(wacom_wac->name)) < 0) {
+ hid_warn(wacom->hdev, "String overflow while assembling device name");
+ }
}
return;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index dc0f7d9a992c..2ccf83837134 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -831,7 +831,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
/* serial number of the tool */
- wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
+ wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) +
(data[4] << 20) + (data[5] << 12) +
(data[6] << 4) + (data[7] >> 4);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 007f26d5f1a4..2f4d09ce027a 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -829,11 +829,22 @@ static void vmbus_wait_for_unload(void)
if (completion_done(&vmbus_connection.unload_event))
goto completed;
- for_each_online_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
+ /*
+ * In a CoCo VM the synic_message_page is not allocated
+ * in hv_synic_alloc(). Instead it is set/cleared in
+ * hv_synic_enable_regs() and hv_synic_disable_regs()
+ * such that it is set only when the CPU is online. If
+ * not all present CPUs are online, the message page
+ * might be NULL, so skip such CPUs.
+ */
page_addr = hv_cpu->synic_message_page;
+ if (!page_addr)
+ continue;
+
msg = (struct hv_message *)page_addr
+ VMBUS_MESSAGE_SINT;
@@ -867,11 +878,14 @@ completed:
* maybe-pending messages on all CPUs to be able to receive new
* messages after we reconnect.
*/
- for_each_online_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
page_addr = hv_cpu->synic_message_page;
+ if (!page_addr)
+ continue;
+
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
msg->header.message_type = HVMSG_NONE;
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 64f9ceca887b..542a1d53b303 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -364,13 +364,20 @@ int hv_common_cpu_init(unsigned int cpu)
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
- *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
- if (!(*inputarg))
- return -ENOMEM;
- if (hv_root_partition) {
- outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
- *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE;
+ /*
+ * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already
+ * allocated if this CPU was previously online and then taken offline
+ */
+ if (!*inputarg) {
+ *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
+ if (!(*inputarg))
+ return -ENOMEM;
+
+ if (hv_root_partition) {
+ outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
+ *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE;
+ }
}
msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
@@ -385,24 +392,17 @@ int hv_common_cpu_init(unsigned int cpu)
int hv_common_cpu_die(unsigned int cpu)
{
- unsigned long flags;
- void **inputarg, **outputarg;
- void *mem;
-
- local_irq_save(flags);
-
- inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
- mem = *inputarg;
- *inputarg = NULL;
-
- if (hv_root_partition) {
- outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
- *outputarg = NULL;
- }
-
- local_irq_restore(flags);
-
- kfree(mem);
+ /*
+ * The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory
+ * is not freed when the CPU goes offline as the hyperv_pcpu_input_arg
+ * may be used by the Hyper-V vPCI driver in reassigning interrupts
+ * as part of the offlining process. The interrupt reassignment
+ * happens *after* the CPUHP_AP_HYPERV_ONLINE state has run and
+ * called this function.
+ *
+ * If a previously offlined CPU is brought back online again, the
+ * originally allocated memory is reused in hv_common_cpu_init().
+ */
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 1c65a6dfb9fa..67f95a29aeca 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1372,7 +1372,7 @@ static int vmbus_bus_init(void)
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
- goto err_cpuhp;
+ goto err_alloc;
hyperv_cpuhp_online = ret;
ret = vmbus_connect();
@@ -1392,9 +1392,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
-err_cpuhp:
- hv_synic_free();
err_alloc:
+ hv_synic_free();
if (vmbus_irq == -1) {
hv_remove_vmbus_handler();
} else {
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 711f451b6946..89e8ed214ea4 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -402,6 +402,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
trace_id = coresight_trace_id_get_cpu_id(cpu);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
cpumask_clear_cpu(cpu, mask);
+ coresight_release_path(path);
continue;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 918d461fcf4a..eaa296ced167 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -942,7 +942,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
len = tmc_etr_buf_get_data(etr_buf, offset,
CORESIGHT_BARRIER_PKT_SIZE, &bufp);
- if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+ if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
return -EINVAL;
coresight_insert_barrier_packet(bufp);
return offset + CORESIGHT_BARRIER_PKT_SIZE;
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 1fc4fd79a1c6..1bab91ce8e95 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -218,7 +218,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
* Enable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
- trblimitr |= TRBLIMITR_ENABLE;
+ trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
/* Synchronize the TRBE enable event */
@@ -236,7 +236,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
* Disable the TRBE without clearing LIMITPTR which
* might be required for fetching the buffer limits.
*/
- trblimitr &= ~TRBLIMITR_ENABLE;
+ trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
if (trbe_needs_drain_after_disable(cpudata))
@@ -582,12 +582,12 @@ static void clr_trbe_status(void)
u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
WARN_ON(is_trbe_enabled());
- trbsr &= ~TRBSR_IRQ;
- trbsr &= ~TRBSR_TRG;
- trbsr &= ~TRBSR_WRAP;
- trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
- trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
- trbsr &= ~TRBSR_STOP;
+ trbsr &= ~TRBSR_EL1_IRQ;
+ trbsr &= ~TRBSR_EL1_TRG;
+ trbsr &= ~TRBSR_EL1_WRAP;
+ trbsr &= ~TRBSR_EL1_EC_MASK;
+ trbsr &= ~TRBSR_EL1_BSC_MASK;
+ trbsr &= ~TRBSR_EL1_S;
write_sysreg_s(trbsr, SYS_TRBSR_EL1);
}
@@ -596,13 +596,13 @@ static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
unsigned long addr = buf->trbe_limit;
- WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
- trblimitr &= ~TRBLIMITR_NVM;
- trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
- trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
- trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+ trblimitr &= ~TRBLIMITR_EL1_nVM;
+ trblimitr &= ~TRBLIMITR_EL1_FM_MASK;
+ trblimitr &= ~TRBLIMITR_EL1_TM_MASK;
+ trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK;
/*
* Fill trace buffer mode is used here while configuring the
@@ -613,14 +613,15 @@ static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
* trace data in the interrupt handler, before reconfiguring
* the TRBE.
*/
- trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
+ trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) &
+ TRBLIMITR_EL1_FM_MASK;
/*
* Trigger mode is not used here while configuring the TRBE for
* the trace capture. Hence just keep this in the ignore mode.
*/
- trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
- TRBLIMITR_TRIG_MODE_SHIFT;
+ trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) &
+ TRBLIMITR_EL1_TM_MASK;
trblimitr |= (addr & PAGE_MASK);
set_trbe_enabled(buf->cpudata, trblimitr);
}
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
index 98ff1b17ad07..77cbb5c63878 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.h
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -30,7 +30,7 @@ static inline bool is_trbe_enabled(void)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
- return trblimitr & TRBLIMITR_ENABLE;
+ return trblimitr & TRBLIMITR_EL1_E;
}
#define TRBE_EC_OTHERS 0
@@ -39,7 +39,7 @@ static inline bool is_trbe_enabled(void)
static inline int get_trbe_ec(u64 trbsr)
{
- return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK;
+ return (trbsr & TRBSR_EL1_EC_MASK) >> TRBSR_EL1_EC_SHIFT;
}
#define TRBE_BSC_NOT_STOPPED 0
@@ -48,63 +48,55 @@ static inline int get_trbe_ec(u64 trbsr)
static inline int get_trbe_bsc(u64 trbsr)
{
- return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK;
+ return (trbsr & TRBSR_EL1_BSC_MASK) >> TRBSR_EL1_BSC_SHIFT;
}
static inline void clr_trbe_irq(void)
{
u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
- trbsr &= ~TRBSR_IRQ;
+ trbsr &= ~TRBSR_EL1_IRQ;
write_sysreg_s(trbsr, SYS_TRBSR_EL1);
}
static inline bool is_trbe_irq(u64 trbsr)
{
- return trbsr & TRBSR_IRQ;
+ return trbsr & TRBSR_EL1_IRQ;
}
static inline bool is_trbe_trg(u64 trbsr)
{
- return trbsr & TRBSR_TRG;
+ return trbsr & TRBSR_EL1_TRG;
}
static inline bool is_trbe_wrap(u64 trbsr)
{
- return trbsr & TRBSR_WRAP;
+ return trbsr & TRBSR_EL1_WRAP;
}
static inline bool is_trbe_abort(u64 trbsr)
{
- return trbsr & TRBSR_ABORT;
+ return trbsr & TRBSR_EL1_EA;
}
static inline bool is_trbe_running(u64 trbsr)
{
- return !(trbsr & TRBSR_STOP);
+ return !(trbsr & TRBSR_EL1_S);
}
-#define TRBE_TRIG_MODE_STOP 0
-#define TRBE_TRIG_MODE_IRQ 1
-#define TRBE_TRIG_MODE_IGNORE 3
-
-#define TRBE_FILL_MODE_FILL 0
-#define TRBE_FILL_MODE_WRAP 1
-#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
-
static inline bool get_trbe_flag_update(u64 trbidr)
{
- return trbidr & TRBIDR_FLAG;
+ return trbidr & TRBIDR_EL1_F;
}
static inline bool is_trbe_programmable(u64 trbidr)
{
- return !(trbidr & TRBIDR_PROG);
+ return !(trbidr & TRBIDR_EL1_P);
}
static inline int get_trbe_address_align(u64 trbidr)
{
- return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK;
+ return (trbidr & TRBIDR_EL1_Align_MASK) >> TRBIDR_EL1_Align_SHIFT;
}
static inline unsigned long get_trbe_write_pointer(void)
@@ -121,7 +113,7 @@ static inline void set_trbe_write_pointer(unsigned long addr)
static inline unsigned long get_trbe_limit_pointer(void)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
- unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
+ unsigned long addr = trblimitr & TRBLIMITR_EL1_LIMIT_MASK;
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
return addr;
@@ -130,7 +122,7 @@ static inline unsigned long get_trbe_limit_pointer(void)
static inline unsigned long get_trbe_base_pointer(void)
{
u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1);
- unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT);
+ unsigned long addr = trbbaser & TRBBASER_EL1_BASE_MASK;
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
return addr;
@@ -139,7 +131,7 @@ static inline unsigned long get_trbe_base_pointer(void)
static inline void set_trbe_base_pointer(unsigned long addr)
{
WARN_ON(is_trbe_enabled());
- WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT)));
+ WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_EL1_BASE_SHIFT)));
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
write_sysreg_s(addr, SYS_TRBBASER_EL1);
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index c5d87aae39c6..bf23bfb51aea 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -40,6 +40,7 @@
#define DW_IC_CON_BUS_CLEAR_CTRL BIT(11)
#define DW_IC_DATA_CMD_DAT GENMASK(7, 0)
+#define DW_IC_DATA_CMD_FIRST_DATA_BYTE BIT(11)
/*
* Registers offset
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index cec25054bb24..2e079cf20bb5 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -176,6 +176,10 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id)
do {
regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
+ if (tmp & DW_IC_DATA_CMD_FIRST_DATA_BYTE)
+ i2c_slave_event(dev->slave,
+ I2C_SLAVE_WRITE_REQUESTED,
+ &val);
val = tmp;
i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
&val);
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 8e987945ed45..39c479f96eb5 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -257,7 +257,7 @@
#define IMG_I2C_TIMEOUT (msecs_to_jiffies(1000))
/*
- * Worst incs are 1 (innacurate) and 16*256 (irregular).
+ * Worst incs are 1 (inaccurate) and 16*256 (irregular).
* So a sensible inc is the logarithmic mean: 64 (2^6), which is
* in the middle of the valid range (0-127).
*/
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 1af0a637d7f1..4d24ceb57ee7 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -201,8 +201,8 @@ static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
{
- u8 prescale, filt, sethold, clkhi, clklo, datavd;
- unsigned int clk_rate, clk_cycle;
+ u8 prescale, filt, sethold, datavd;
+ unsigned int clk_rate, clk_cycle, clkhi, clklo;
enum lpi2c_imx_pincfg pincfg;
unsigned int temp;
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
index b21ffd6df927..5ef136c3ecb1 100644
--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -1118,8 +1118,10 @@ static int pci1xxxx_i2c_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend,
pci1xxxx_i2c_resume);
-static void pci1xxxx_i2c_shutdown(struct pci1xxxx_i2c *i2c)
+static void pci1xxxx_i2c_shutdown(void *data)
{
+ struct pci1xxxx_i2c *i2c = data;
+
pci1xxxx_i2c_config_padctrl(i2c, false);
pci1xxxx_i2c_configure_core_reg(i2c, false);
}
@@ -1156,7 +1158,7 @@ static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev,
init_completion(&i2c->i2c_xfer_done);
pci1xxxx_i2c_init(i2c);
- ret = devm_add_action(dev, (void (*)(void *))pci1xxxx_i2c_shutdown, i2c);
+ ret = devm_add_action(dev, pci1xxxx_i2c_shutdown, i2c);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 047dfef7a657..878c076ebdc6 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -520,6 +520,17 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
MV64XXX_I2C_REG_CONTROL_IFLG) {
+ /*
+ * It seems that sometime the controller updates the status
+ * register only after it asserts IFLG in control register.
+ * This may result in weird bugs when in atomic mode. A delay
+ * of 100 ns before reading the status register solves this
+ * issue. This bug does not seem to appear when using
+ * interrupts.
+ */
+ if (drv_data->atomic)
+ ndelay(100);
+
status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
mv64xxx_i2c_fsm(drv_data, status);
mv64xxx_i2c_do_action(drv_data);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 2e153f2f71b6..78682388e02e 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1752,16 +1752,21 @@ nodma:
if (!clk_freq || clk_freq > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(qup->dev, "clock frequency not supported %d\n",
clk_freq);
- return -EINVAL;
+ ret = -EINVAL;
+ goto fail_dma;
}
qup->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(qup->base))
- return PTR_ERR(qup->base);
+ if (IS_ERR(qup->base)) {
+ ret = PTR_ERR(qup->base);
+ goto fail_dma;
+ }
qup->irq = platform_get_irq(pdev, 0);
- if (qup->irq < 0)
- return qup->irq;
+ if (qup->irq < 0) {
+ ret = qup->irq;
+ goto fail_dma;
+ }
if (has_acpi_companion(qup->dev)) {
ret = device_property_read_u32(qup->dev,
@@ -1775,13 +1780,15 @@ nodma:
qup->clk = devm_clk_get(qup->dev, "core");
if (IS_ERR(qup->clk)) {
dev_err(qup->dev, "Could not get core clock\n");
- return PTR_ERR(qup->clk);
+ ret = PTR_ERR(qup->clk);
+ goto fail_dma;
}
qup->pclk = devm_clk_get(qup->dev, "iface");
if (IS_ERR(qup->pclk)) {
dev_err(qup->dev, "Could not get iface clock\n");
- return PTR_ERR(qup->pclk);
+ ret = PTR_ERR(qup->pclk);
+ goto fail_dma;
}
qup_i2c_enable_clocks(qup);
src_clk_freq = clk_get_rate(qup->clk);
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 4fe15cd78907..ffc54fbf814d 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -576,12 +576,14 @@ static int sprd_i2c_remove(struct platform_device *pdev)
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
- ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret));
i2c_del_adapter(&i2c_dev->adap);
- clk_disable_unprepare(i2c_dev->clk);
+
+ if (ret >= 0)
+ clk_disable_unprepare(i2c_dev->clk);
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index aa2d19db2b1d..34201d7ef33e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -199,6 +199,43 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
return __intel_idle(dev, drv, index);
}
+static __always_inline int __intel_idle_hlt(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ raw_safe_halt();
+ raw_local_irq_disable();
+ return index;
+}
+
+/**
+ * intel_idle_hlt - Ask the processor to enter the given idle state using hlt.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the HLT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * Must be called under local_irq_disable().
+ */
+static __cpuidle int intel_idle_hlt(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ return __intel_idle_hlt(dev, drv, index);
+}
+
+static __cpuidle int intel_idle_hlt_irq_on(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ int ret;
+
+ raw_local_irq_enable();
+ ret = __intel_idle_hlt(dev, drv, index);
+ raw_local_irq_disable();
+
+ return ret;
+}
+
/**
* intel_idle_s2idle - Ask the processor to enter the given idle state.
* @dev: cpuidle device of the target CPU.
@@ -1242,6 +1279,25 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state vmguest_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "HLT",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
+ .exit_latency = 5,
+ .target_residency = 10,
+ .enter = &intel_idle_hlt, },
+ {
+ .name = "C1L",
+ .desc = "Long HLT",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 5,
+ .target_residency = 200,
+ .enter = &intel_idle_hlt, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1839,6 +1895,66 @@ static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
return true;
}
+static void state_update_enter_method(struct cpuidle_state *state, int cstate)
+{
+ if (state->enter == intel_idle_hlt) {
+ if (force_irq_on) {
+ pr_info("forced intel_idle_irq for state %d\n", cstate);
+ state->enter = intel_idle_hlt_irq_on;
+ }
+ return;
+ }
+ if (state->enter == intel_idle_hlt_irq_on)
+ return; /* no update scenarios */
+
+ if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) {
+ /*
+ * Combining with XSTATE with IBRS or IRQ_ENABLE flags
+ * is not currently supported but this driver.
+ */
+ WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS);
+ WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE);
+ state->enter = intel_idle_xstate;
+ return;
+ }
+
+ if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
+ state->flags & CPUIDLE_FLAG_IBRS) {
+ /*
+ * IBRS mitigation requires that C-states are entered
+ * with interrupts disabled.
+ */
+ WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE);
+ state->enter = intel_idle_ibrs;
+ return;
+ }
+
+ if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) {
+ state->enter = intel_idle_irq;
+ return;
+ }
+
+ if (force_irq_on) {
+ pr_info("forced intel_idle_irq for state %d\n", cstate);
+ state->enter = intel_idle_irq;
+ }
+}
+
+/*
+ * For mwait based states, we want to verify the cpuid data to see if the state
+ * is actually supported by this specific CPU.
+ * For non-mwait based states, this check should be skipped.
+ */
+static bool should_verify_mwait(struct cpuidle_state *state)
+{
+ if (state->enter == intel_idle_hlt)
+ return false;
+ if (state->enter == intel_idle_hlt_irq_on)
+ return false;
+
+ return true;
+}
+
static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
{
int cstate;
@@ -1887,35 +2003,15 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
}
mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
- if (!intel_idle_verify_cstate(mwait_hint))
+ if (should_verify_mwait(&cpuidle_state_table[cstate]) && !intel_idle_verify_cstate(mwait_hint))
continue;
/* Structure copy. */
drv->states[drv->state_count] = cpuidle_state_table[cstate];
state = &drv->states[drv->state_count];
- if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) {
- /*
- * Combining with XSTATE with IBRS or IRQ_ENABLE flags
- * is not currently supported but this driver.
- */
- WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS);
- WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE);
- state->enter = intel_idle_xstate;
- } else if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
- state->flags & CPUIDLE_FLAG_IBRS) {
- /*
- * IBRS mitigation requires that C-states are entered
- * with interrupts disabled.
- */
- WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE);
- state->enter = intel_idle_ibrs;
- } else if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) {
- state->enter = intel_idle_irq;
- } else if (force_irq_on) {
- pr_info("forced intel_idle_irq for state %d\n", cstate);
- state->enter = intel_idle_irq;
- }
+ state_update_enter_method(state, cstate);
+
if ((disabled_states_mask & BIT(drv->state_count)) ||
((icpu->use_acpi || force_use_acpi) &&
@@ -2041,6 +2137,93 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
}
+/*
+ * Match up the latency and break even point of the bare metal (cpu based)
+ * states with the deepest VM available state.
+ *
+ * We only want to do this for the deepest state, the ones that has
+ * the TLB_FLUSHED flag set on the .
+ *
+ * All our short idle states are dominated by vmexit/vmenter latencies,
+ * not the underlying hardware latencies so we keep our values for these.
+ */
+static void matchup_vm_state_with_baremetal(void)
+{
+ int cstate;
+
+ for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
+ int matching_cstate;
+
+ if (intel_idle_max_cstate_reached(cstate))
+ break;
+
+ if (!cpuidle_state_table[cstate].enter)
+ break;
+
+ if (!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_TLB_FLUSHED))
+ continue;
+
+ for (matching_cstate = 0; matching_cstate < CPUIDLE_STATE_MAX; ++matching_cstate) {
+ if (!icpu->state_table[matching_cstate].enter)
+ break;
+ if (icpu->state_table[matching_cstate].exit_latency > cpuidle_state_table[cstate].exit_latency) {
+ cpuidle_state_table[cstate].exit_latency = icpu->state_table[matching_cstate].exit_latency;
+ cpuidle_state_table[cstate].target_residency = icpu->state_table[matching_cstate].target_residency;
+ }
+ }
+
+ }
+}
+
+
+static int __init intel_idle_vminit(const struct x86_cpu_id *id)
+{
+ int retval;
+
+ cpuidle_state_table = vmguest_cstates;
+
+ icpu = (const struct idle_cpu *)id->driver_data;
+
+ pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+ boot_cpu_data.x86_model);
+
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (!intel_idle_cpuidle_devices)
+ return -ENOMEM;
+
+ /*
+ * We don't know exactly what the host will do when we go idle, but as a worst estimate
+ * we can assume that the exit latency of the deepest host state will be hit for our
+ * deep (long duration) guest idle state.
+ * The same logic applies to the break even point for the long duration guest idle state.
+ * So lets copy these two properties from the table we found for the host CPU type.
+ */
+ matchup_vm_state_with_baremetal();
+
+ intel_idle_cpuidle_driver_init(&intel_idle_driver);
+
+ retval = cpuidle_register_driver(&intel_idle_driver);
+ if (retval) {
+ struct cpuidle_driver *drv = cpuidle_get_driver();
+ printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
+ drv ? drv->name : "none");
+ goto init_driver_fail;
+ }
+
+ retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
+ intel_idle_cpu_online, NULL);
+ if (retval < 0)
+ goto hp_setup_fail;
+
+ return 0;
+hp_setup_fail:
+ intel_idle_cpuidle_devices_uninit();
+ cpuidle_unregister_driver(&intel_idle_driver);
+init_driver_fail:
+ free_percpu(intel_idle_cpuidle_devices);
+ return retval;
+}
+
static int __init intel_idle_init(void)
{
const struct x86_cpu_id *id;
@@ -2059,6 +2242,8 @@ static int __init intel_idle_init(void)
id = x86_match_cpu(intel_idle_ids);
if (id) {
if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return intel_idle_vminit(id);
pr_debug("Please enable MWAIT in BIOS SETUP\n");
return -ENODEV;
}
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index f98393d74666..b8636fa8eaeb 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -1048,7 +1048,7 @@ int kx022a_probe_internal(struct device *dev)
data->ien_reg = KX022A_REG_INC4;
} else {
irq = fwnode_irq_get_byname(fwnode, "INT2");
- if (irq <= 0)
+ if (irq < 0)
return dev_err_probe(dev, irq, "No suitable IRQ\n");
data->inc_reg = KX022A_REG_INC5;
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 5f7d81b44b1d..282e539157f8 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
adev = ACPI_COMPANION(indio_dev->dev.parent);
if (!adev)
- return 0;
+ return -ENXIO;
/* Read _ONT data, which should be a package of 6 integers. */
status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
if (status == AE_NOT_FOUND) {
- return 0;
+ return -ENXIO;
} else if (ACPI_FAILURE(status)) {
dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
status);
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 38394341fd6e..5a5dd5e87ffc 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = {
.unprepare = ad4130_int_clk_unprepare,
};
+static void ad4130_clk_del_provider(void *of_node)
+{
+ of_clk_del_provider(of_node);
+}
+
static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
@@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
struct clk_init_data init;
const char *clk_name;
struct clk *clk;
+ int ret;
if (st->int_pin_sel == AD4130_INT_PIN_CLK ||
st->mclk_sel != AD4130_MCLK_76_8KHZ)
@@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
if (IS_ERR(clk))
return PTR_ERR(clk);
- return of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+ ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node);
}
static int ad4130_setup(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 55a6ab591016..99bb604b78c8 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = {
__AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
-#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
- __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
-
#define AD719x_TEMP_CHANNEL(_si, _address) \
__AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
@@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = {
AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
- AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
+ AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M),
AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
@@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = {
AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
- AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
+ AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d8570f620785..7e2192870743 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
init_completion(&sigma_delta->completion);
sigma_delta->irq_dis = true;
+
+ /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
+ irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
+
ret = devm_request_irq(dev, sigma_delta->spi->irq,
ad_sd_data_rdy_trig_poll,
sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index a775d2e40567..dce9ec91e4a7 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
{
struct imx93_adc *adc = iio_priv(indio_dev);
struct device *dev = adc->dev;
- long ret;
- u32 vref_uv;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = vref_uv = regulator_get_voltage(adc->vref);
+ ret = regulator_get_voltage(adc->vref);
if (ret < 0)
return ret;
- *val = vref_uv / 1000;
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
diff --git a/drivers/iio/adc/mt6370-adc.c b/drivers/iio/adc/mt6370-adc.c
index bc62e5a9d50d..0bc112135bca 100644
--- a/drivers/iio/adc/mt6370-adc.c
+++ b/drivers/iio/adc/mt6370-adc.c
@@ -19,6 +19,7 @@
#include <dt-bindings/iio/adc/mediatek,mt6370_adc.h>
+#define MT6370_REG_DEV_INFO 0x100
#define MT6370_REG_CHG_CTRL3 0x113
#define MT6370_REG_CHG_CTRL7 0x117
#define MT6370_REG_CHG_ADC 0x121
@@ -27,6 +28,7 @@
#define MT6370_ADC_START_MASK BIT(0)
#define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4)
#define MT6370_AICR_ICHG_MASK GENMASK(7, 2)
+#define MT6370_VENID_MASK GENMASK(7, 4)
#define MT6370_AICR_100_mA 0x0
#define MT6370_AICR_150_mA 0x1
@@ -47,6 +49,10 @@
#define ADC_CONV_TIME_MS 35
#define ADC_CONV_POLLING_TIME_US 1000
+#define MT6370_VID_RT5081 0x8
+#define MT6370_VID_RT5081A 0xA
+#define MT6370_VID_MT6370 0xE
+
struct mt6370_adc_data {
struct device *dev;
struct regmap *regmap;
@@ -55,6 +61,7 @@ struct mt6370_adc_data {
* from being read at the same time.
*/
struct mutex adc_lock;
+ unsigned int vid;
};
static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan,
@@ -98,6 +105,30 @@ adc_unlock:
return ret;
}
+static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv)
+{
+ switch (priv->vid) {
+ case MT6370_VID_RT5081:
+ case MT6370_VID_RT5081A:
+ case MT6370_VID_MT6370:
+ return 3350;
+ default:
+ return 3875;
+ }
+}
+
+static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv)
+{
+ switch (priv->vid) {
+ case MT6370_VID_RT5081:
+ case MT6370_VID_RT5081A:
+ case MT6370_VID_MT6370:
+ return 2680;
+ default:
+ return 3870;
+ }
+}
+
static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
int chan, int *val1, int *val2)
{
@@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
case MT6370_AICR_250_mA:
case MT6370_AICR_300_mA:
case MT6370_AICR_350_mA:
- *val1 = 3350;
+ *val1 = mt6370_adc_get_ibus_scale(priv);
break;
default:
*val1 = 5000;
@@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
case MT6370_ICHG_600_mA:
case MT6370_ICHG_700_mA:
case MT6370_ICHG_800_mA:
- *val1 = 2680;
+ *val1 = mt6370_adc_get_ibat_scale(priv);
break;
default:
*val1 = 5000;
@@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = {
MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)),
};
+static int mt6370_get_vendor_info(struct mt6370_adc_data *priv)
+{
+ unsigned int dev_info;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
+ if (ret)
+ return ret;
+
+ priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
+
+ return 0;
+}
+
static int mt6370_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev)
priv->regmap = regmap;
mutex_init(&priv->adc_lock);
+ ret = mt6370_get_vendor_info(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get vid\n");
+
ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0);
if (ret)
return dev_err_probe(dev, ret, "Failed to reset ADC\n");
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index bca79a93cbe4..a50f39143d3e 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
ret = mxs_lradc_adc_trigger_init(iio);
if (ret)
- goto err_trig;
+ return ret;
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
&mxs_lradc_adc_trigger_handler,
&mxs_lradc_adc_buffer_ops);
if (ret)
- return ret;
+ goto err_trig;
adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
@@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
err_dev:
mxs_lradc_adc_hw_stop(adc);
- mxs_lradc_adc_trigger_remove(iio);
-err_trig:
iio_triggered_buffer_cleanup(iio);
+err_trig:
+ mxs_lradc_adc_trigger_remove(iio);
return ret;
}
@@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
iio_device_unregister(iio);
mxs_lradc_adc_hw_stop(adc);
- mxs_lradc_adc_trigger_remove(iio);
iio_triggered_buffer_cleanup(iio);
+ mxs_lradc_adc_trigger_remove(iio);
return 0;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index c1c439215aeb..7dfc9c927a23 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret = 0;
- if (adc_chan > PALMAS_ADC_CH_MAX)
+ if (adc_chan >= PALMAS_ADC_CH_MAX)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret = 0;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev,
int old;
int ret;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 1aadb2ad2cab..bd7e2408bf28 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -2006,16 +2006,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
* to get the *real* number of channels.
*/
ret = device_property_count_u32(dev, "st,adc-diff-channels");
- if (ret < 0)
- return ret;
-
- ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
- if (ret > adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
- return -EINVAL;
- } else if (ret > 0) {
- adc->num_diff = ret;
- num_channels += ret;
+ if (ret > 0) {
+ ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
+ if (ret > adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
+ return -EINVAL;
+ } else if (ret > 0) {
+ adc->num_diff = ret;
+ num_channels += ret;
+ }
}
/* Optional sample time is provided either for each, or all channels */
@@ -2037,6 +2036,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
struct device *dev = &indio_dev->dev;
u32 num_diff = adc->num_diff;
+ int num_se = nchans - num_diff;
int size = num_diff * sizeof(*diff) / sizeof(u32);
int scan_index = 0, ret, i, c;
u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
@@ -2063,29 +2063,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
scan_index++;
}
}
-
- ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
- nchans);
- if (ret)
- return ret;
-
- for (c = 0; c < nchans; c++) {
- if (chans[c] >= adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Invalid channel %d\n",
- chans[c]);
- return -EINVAL;
+ if (num_se > 0) {
+ ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret);
+ return ret;
}
- /* Channel can't be configured both as single-ended & diff */
- for (i = 0; i < num_diff; i++) {
- if (chans[c] == diff[i].vinp) {
- dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]);
+ for (c = 0; c < num_se; c++) {
+ if (chans[c] >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n",
+ chans[c]);
return -EINVAL;
}
+
+ /* Channel can't be configured both as single-ended & diff */
+ for (i = 0; i < num_diff; i++) {
+ if (chans[c] == diff[i].vinp) {
+ dev_err(&indio_dev->dev, "channel %d misconfigured\n",
+ chans[c]);
+ return -EINVAL;
+ }
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ chans[c], 0, scan_index, false);
+ scan_index++;
}
- stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
- chans[c], 0, scan_index, false);
- scan_index++;
}
if (adc->nsmps > 0) {
@@ -2306,7 +2309,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
if (legacy)
ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
- num_channels);
+ timestamping ? num_channels - 1 : num_channels);
else
ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
if (ret < 0)
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index 07e9f6ae16a8..e3366cf5eb31 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1007,7 +1007,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev,
ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
val);
- if (ret)
+ if (ret < 0)
return ret;
ad74413r_adc_to_resistance_result(*val, val);
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 6c74fea21736..addd97a78838 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
obj-$(CONFIG_AD5592R) += ad5592r.o
obj-$(CONFIG_AD5593R) += ad5593r.o
obj-$(CONFIG_AD5755) += ad5755.o
-obj-$(CONFIG_AD5755) += ad5758.o
+obj-$(CONFIG_AD5758) += ad5758.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5766) += ad5766.o
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 46bf758760f8..3f5661a3718f 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -47,12 +47,18 @@ static int mcp4725_suspend(struct device *dev)
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
u8 outbuf[2];
+ int ret;
outbuf[0] = (data->powerdown_mode + 1) << 4;
outbuf[1] = 0;
data->powerdown = true;
- return i2c_master_send(data->client, outbuf, 2);
+ ret = i2c_master_send(data->client, outbuf, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+ return 0;
}
static int mcp4725_resume(struct device *dev)
@@ -60,13 +66,19 @@ static int mcp4725_resume(struct device *dev)
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
u8 outbuf[2];
+ int ret;
/* restore previous DAC value */
outbuf[0] = (data->dac_value >> 8) & 0xf;
outbuf[1] = data->dac_value & 0xff;
data->powerdown = false;
- return i2c_master_send(data->client, outbuf, 2);
+ ret = i2c_master_send(data->client, outbuf, 2);
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+ return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend,
mcp4725_resume);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 99576b2c171f..32d7f8364230 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -275,9 +275,14 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
struct device *dev = regmap_get_device(st->map);
+ struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
pm_runtime_get_sync(dev);
+ mutex_lock(&st->lock);
+ inv_icm42600_timestamp_reset(ts);
+ mutex_unlock(&st->lock);
+
return 0;
}
@@ -375,7 +380,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
struct device *dev = regmap_get_device(st->map);
unsigned int sensor;
unsigned int *watermark;
- struct inv_icm42600_timestamp *ts;
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
unsigned int sleep_temp = 0;
unsigned int sleep_sensor = 0;
@@ -385,11 +389,9 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
if (indio_dev == st->indio_gyro) {
sensor = INV_ICM42600_SENSOR_GYRO;
watermark = &st->fifo.watermark.gyro;
- ts = iio_priv(st->indio_gyro);
} else if (indio_dev == st->indio_accel) {
sensor = INV_ICM42600_SENSOR_ACCEL;
watermark = &st->fifo.watermark.accel;
- ts = iio_priv(st->indio_accel);
} else {
return -EINVAL;
}
@@ -417,8 +419,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
if (!st->fifo.on)
ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp);
- inv_icm42600_timestamp_reset(ts);
-
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 8bb68975b259..7653261d2dc2 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -337,6 +337,17 @@ free_gains:
return ret;
}
+static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
+ int num_times)
+{
+ int i;
+
+ for (i = 0; i < num_times; i++) {
+ int_micro_times[i * 2] = time_us[i] / 1000000;
+ int_micro_times[i * 2 + 1] = time_us[i] % 1000000;
+ }
+}
+
/**
* iio_gts_build_avail_time_table - build table of available integration times
* @gts: Gain time scale descriptor
@@ -351,7 +362,7 @@ free_gains:
*/
static int iio_gts_build_avail_time_table(struct iio_gts *gts)
{
- int *times, i, j, idx = 0;
+ int *times, i, j, idx = 0, *int_micro_times;
if (!gts->num_itime)
return 0;
@@ -378,13 +389,24 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts)
}
}
}
- gts->avail_time_tables = times;
- /*
- * This is just to survive a unlikely corner-case where times in the
- * given time table were not unique. Else we could just trust the
- * gts->num_itime.
- */
- gts->num_avail_time_tables = idx;
+
+ /* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */
+ int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL);
+ if (int_micro_times) {
+ /*
+ * This is just to survive a unlikely corner-case where times in
+ * the given time table were not unique. Else we could just
+ * trust the gts->num_itime.
+ */
+ gts->num_avail_time_tables = idx;
+ iio_gts_us_to_int_micro(times, int_micro_times, idx);
+ }
+
+ gts->avail_time_tables = int_micro_times;
+ kfree(times);
+
+ if (!int_micro_times)
+ return -ENOMEM;
return 0;
}
@@ -683,8 +705,8 @@ int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
return -EINVAL;
*vals = gts->avail_time_tables;
- *type = IIO_VAL_INT;
- *length = gts->num_avail_time_tables;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = gts->num_avail_time_tables * 2;
return IIO_AVAIL_LIST;
}
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index e486dcf35eba..f85194fda6b0 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -231,6 +231,9 @@ struct bu27034_result {
static const struct regmap_range bu27034_volatile_ranges[] = {
{
+ .range_min = BU27034_REG_SYSTEM_CONTROL,
+ .range_max = BU27034_REG_SYSTEM_CONTROL,
+ }, {
.range_min = BU27034_REG_MODE_CONTROL4,
.range_max = BU27034_REG_MODE_CONTROL4,
}, {
@@ -1167,11 +1170,12 @@ static int bu27034_read_raw(struct iio_dev *idev,
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *val = bu27034_get_int_time(data);
- if (*val < 0)
- return *val;
+ *val = 0;
+ *val2 = bu27034_get_int_time(data);
+ if (*val2 < 0)
+ return *val2;
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
return bu27034_get_scale(data, chan->channel, val, val2);
@@ -1229,7 +1233,10 @@ static int bu27034_write_raw(struct iio_dev *idev,
ret = bu27034_set_scale(data, chan->channel, val, val2);
break;
case IIO_CHAN_INFO_INT_TIME:
- ret = bu27034_try_set_int_time(data, val);
+ if (!val)
+ ret = bu27034_try_set_int_time(data, val2);
+ else
+ ret = -EINVAL;
break;
default:
ret = -EINVAL;
@@ -1268,12 +1275,19 @@ static int bu27034_chip_init(struct bu27034_data *data)
int ret, sel;
/* Reset */
- ret = regmap_update_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL,
+ ret = regmap_write_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL,
BU27034_MASK_SW_RESET, BU27034_MASK_SW_RESET);
if (ret)
return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
msleep(1);
+
+ ret = regmap_reinit_cache(data->regmap, &bu27034_regmap);
+ if (ret) {
+ dev_err(data->dev, "Failed to reinit reg cache\n");
+ return ret;
+ }
+
/*
* Read integration time here to ensure it is in regmap cache. We do
* this to speed-up the int-time acquisition in the start of the buffer
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 14e29330e972..94f5d611e98c 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -8,6 +8,7 @@
* TODO: Proximity
*/
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -42,6 +43,7 @@
#define VCNL4035_ALS_PERS_MASK GENMASK(3, 2)
#define VCNL4035_INT_ALS_IF_H_MASK BIT(12)
#define VCNL4035_INT_ALS_IF_L_MASK BIT(13)
+#define VCNL4035_DEV_ID_MASK GENMASK(7, 0)
/* Default values */
#define VCNL4035_MODE_ALS_ENABLE BIT(0)
@@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data)
return ret;
}
+ id = FIELD_GET(VCNL4035_DEV_ID_MASK, id);
if (id != VCNL4035_DEV_ID_VAL) {
dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
id, VCNL4035_DEV_ID_VAL);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index 28bb7efe8df8..e155a75b3cd2 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -296,12 +296,13 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
return ret;
ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
- if (ret)
- return ret;
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
+ if (ret)
+ return ret;
+
switch (chan->address) {
case TEMPERATURE:
*val = t;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 93a1c48d0c32..6b3f4384e46a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3295,7 +3295,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->traffic_class = tos;
route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
route->path_rec->rate_selector = IB_SA_EQ;
- route->path_rec->rate = iboe_get_rate(ndev);
+ route->path_rec->rate = IB_RATE_PORT_CURRENT;
dev_put(ndev);
route->path_rec->packet_life_time_selector = IB_SA_EQ;
/* In case ACK timeout is set, use this value to calculate
@@ -4964,7 +4964,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (!ndev)
return -ENODEV;
- ib.rec.rate = iboe_get_rate(ndev);
+ ib.rec.rate = IB_RATE_PORT_CURRENT;
ib.rec.hop_limit = 1;
ib.rec.mtu = iboe_get_mtu(ndev->mtu);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4796f6a8828c..e836c9c477f6 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1850,8 +1850,13 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
attr->path_mtu = cmd->base.path_mtu;
if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
attr->path_mig_state = cmd->base.path_mig_state;
- if (cmd->base.attr_mask & IB_QP_QKEY)
+ if (cmd->base.attr_mask & IB_QP_QKEY) {
+ if (cmd->base.qkey & IB_QP_SET_QKEY && !capable(CAP_NET_RAW)) {
+ ret = -EPERM;
+ goto release_qp;
+ }
attr->qkey = cmd->base.qkey;
+ }
if (cmd->base.attr_mask & IB_QP_RQ_PSN)
attr->rq_psn = cmd->base.rq_psn;
if (cmd->base.attr_mask & IB_QP_SQ_PSN)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fbace69672ca..7c9c79c13941 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -222,8 +222,12 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
while (list_empty(&ev_queue->event_list)) {
- spin_unlock_irq(&ev_queue->lock);
+ if (ev_queue->is_closed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
+ spin_unlock_irq(&ev_queue->lock);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
@@ -233,12 +237,6 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
return -ERESTARTSYS;
spin_lock_irq(&ev_queue->lock);
-
- /* If device was disassociated and no event exists set an error */
- if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
- spin_unlock_irq(&ev_queue->lock);
- return -EIO;
- }
}
event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 5a2baf49ecaa..2c95e6f3d47a 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -135,8 +135,6 @@ struct bnxt_re_dev {
struct delayed_work worker;
u8 cur_prio_map;
- u16 active_speed;
- u8 active_width;
/* FP Notification Queue (CQ & SRQ) */
struct tasklet_struct nq_task;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e86afecfbe46..952811c40c54 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -199,6 +199,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -228,10 +229,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
port_attr->sm_sl = 0;
port_attr->subnet_timeout = 0;
port_attr->init_type_reply = 0;
- port_attr->active_speed = rdev->active_speed;
- port_attr->active_width = rdev->active_width;
+ rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
+ &port_attr->active_width);
- return 0;
+ return rc;
}
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
@@ -3341,9 +3342,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */
- rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
-
- return 0;
+ return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
}
static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b9e2f89337e8..3073398a2183 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1077,8 +1077,6 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
return rc;
}
dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
- ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
- &rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
@@ -1336,6 +1334,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
{
struct bnxt_qplib_cc_param cc_param = {};
+ /* Do not enable congestion control on VFs */
+ if (rdev->is_virtfn)
+ return;
+
/* Currently enabling only for GenP5 adapters */
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
return;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f139d4cd1712..8974f6235cfa 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -2056,6 +2056,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
u32 pg_sz_lvl;
int rc;
+ if (!cq->dpi) {
+ dev_err(&rcfw->pdev->dev,
+ "FP: CREATE_CQ failed due to NULL DPI\n");
+ return -EINVAL;
+ }
+
hwq_attr.res = res;
hwq_attr.depth = cq->max_wqe;
hwq_attr.stride = sizeof(struct cq_base);
@@ -2069,11 +2075,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
CMDQ_BASE_OPCODE_CREATE_CQ,
sizeof(req));
- if (!cq->dpi) {
- dev_err(&rcfw->pdev->dev,
- "FP: CREATE_CQ failed due to NULL DPI\n");
- return -EINVAL;
- }
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
req.cq_size = cpu_to_le32(cq->hwq.max_elements);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 126d4f26f75a..81b0c5e879f9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
return -EINVAL;
hwq_attr->sginfo->npages = npages;
} else {
- unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
- hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
-
+ npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
+ hwq_attr->sginfo->pgsize);
hwq->is_user = true;
- npages = sginfo_num_pages;
- npages = (npages * PAGE_SIZE) /
- BIT_ULL(hwq_attr->sginfo->pgshft);
- if ((sginfo_num_pages * PAGE_SIZE) %
- BIT_ULL(hwq_attr->sginfo->pgshft))
- if (!npages)
- npages++;
}
if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 1714a1e23113..b967a17a44be 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -617,16 +617,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
/* Free the hwq if it already exist, must be a rereg */
if (mr->hwq.max_elements)
bnxt_qplib_free_hwq(res, &mr->hwq);
- /* Use system PAGE_SIZE */
hwq_attr.res = res;
hwq_attr.depth = pages;
- hwq_attr.stride = buf_pg_size;
+ hwq_attr.stride = sizeof(dma_addr_t);
hwq_attr.type = HWQ_TYPE_MR;
hwq_attr.sginfo = &sginfo;
hwq_attr.sginfo->umem = umem;
hwq_attr.sginfo->npages = pages;
- hwq_attr.sginfo->pgsize = PAGE_SIZE;
- hwq_attr.sginfo->pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo->pgsize = buf_pg_size;
+ hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
if (rc) {
dev_err(&res->pdev->dev,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 8eca6c14d0cf..2a195c4b0f17 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1403,7 +1403,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev,
*/
static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
{
- u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
+ u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
struct scatterlist *sgl;
int sg_dma_cnt, err;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 84f1167de1d9..d4c6b9bc0a4e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4583,11 +4583,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtu = ib_mtu_enum_to_int(ib_mtu);
if (WARN_ON(mtu <= 0))
return -EINVAL;
-#define MAX_LP_MSG_LEN 16384
- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
- if (WARN_ON(lp_pktn_ini >= 0xF))
- return -EINVAL;
+#define MIN_LP_MSG_LEN 1024
+ /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
+ lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
if (attr_mask & IB_QP_PATH_MTU) {
hr_reg_write(context, QPC_MTU, ib_mtu);
@@ -5012,7 +5010,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
{
#define QP_ACK_TIMEOUT_MAX_HIP08 20
-#define QP_ACK_TIMEOUT_OFFSET 10
#define QP_ACK_TIMEOUT_MAX 31
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
@@ -5021,7 +5018,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
"local ACK timeout shall be 0 to 20.\n");
return false;
}
- *timeout += QP_ACK_TIMEOUT_OFFSET;
+ *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev,
@@ -5307,6 +5304,18 @@ out:
return ret;
}
+static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_qp_context *context)
+{
+ u8 timeout;
+
+ timeout = (u8)hr_reg_read(context, QPC_AT);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
+
+ return timeout;
+}
+
static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
@@ -5384,7 +5393,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
- qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
+ qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 1b44d2434ab4..7033eae2407c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -44,6 +44,8 @@
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
#define HNS_ROCE_V2_RSV_XRCD_NUM 0
+#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10
+
#define HNS_ROCE_V3_SCCC_SZ 64
#define HNS_ROCE_V3_GMV_ENTRY_SZ 32
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 37a5cf62f88b..14376490ac22 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -33,6 +33,7 @@
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
+#include <linux/math.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
return page_cnt;
}
+static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
+{
+ return int_pow(ba_per_bt, hopnum - 1);
+}
+
+static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ unsigned int pg_shift)
+{
+ unsigned long cap = hr_dev->caps.page_size_cap;
+ struct hns_roce_buf_region *re;
+ unsigned int pgs_per_l1ba;
+ unsigned int ba_per_bt;
+ unsigned int ba_num;
+ int i;
+
+ for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
+ if (!(BIT(pg_shift) & cap))
+ continue;
+
+ ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
+ ba_num = 0;
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ re = &mtr->hem_cfg.region[i];
+ if (re->hopnum == 0)
+ continue;
+
+ pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
+ ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
+ }
+
+ if (ba_num <= ba_per_bt)
+ return pg_shift;
+ }
+
+ return 0;
+}
+
static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned int ba_page_shift)
{
@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
hns_roce_hem_list_init(&mtr->hem_list);
if (!cfg->is_direct) {
+ ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
+ if (!ba_page_shift)
+ return -ERANGE;
+
ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
cfg->region, cfg->region_count,
ba_page_shift);
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index ab5cdf782785..eaa12c124598 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (!iwqp->user_mode)
cancel_delayed_work_sync(&iwqp->dwork_flush);
- irdma_qp_rem_ref(&iwqp->ibqp);
- wait_for_completion(&iwqp->free_qp);
- irdma_free_lsmm_rsrc(iwqp);
- irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
-
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
irdma_clean_cqes(iwqp, iwqp->iwscq);
@@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_clean_cqes(iwqp, iwqp->iwrcq);
}
}
+
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ wait_for_completion(&iwqp->free_qp);
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+
irdma_remove_push_mmap_entries(iwqp);
irdma_free_qp_rsrc(iwqp);
@@ -3291,6 +3292,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
break;
case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG;
+ info.local_fence = info.read_fence;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
break;
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 1c06920505d2..93257fa5aae8 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -209,7 +209,8 @@ static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
!vport_qcounters_supported(dev)) || !port_num)
return &dev->port[0].cnts;
- return &dev->port[port_num - 1].cnts;
+ return is_mdev_switchdev_mode(dev->mdev) ?
+ &dev->port[1].cnts : &dev->port[port_num - 1].cnts;
}
/**
@@ -262,7 +263,7 @@ static struct rdma_hw_stats *
mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts;
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
return do_alloc_stats(cnts);
}
@@ -329,6 +330,7 @@ static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev,
{
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ struct mlx5_core_dev *mdev;
__be32 val;
int ret, i;
@@ -336,12 +338,16 @@ static int mlx5_ib_query_q_counters_vport(struct mlx5_ib_dev *dev,
dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK)
return 0;
+ mdev = mlx5_eswitch_get_core_dev(dev->port[port_num].rep->esw);
+ if (!mdev)
+ return -EOPNOTSUPP;
+
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
MLX5_SET(query_q_counter_in, in, other_vport, 1);
MLX5_SET(query_q_counter_in, in, vport_number,
dev->port[port_num].rep->vport);
MLX5_SET(query_q_counter_in, in, aggregate, 1);
- ret = mlx5_cmd_exec_inout(dev->mdev, query_q_counter, in, out);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
if (ret)
return ret;
@@ -575,43 +581,53 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
bool is_vport = is_mdev_switchdev_mode(dev->mdev) &&
port_num != MLX5_VPORT_PF;
const struct mlx5_ib_counter *names;
- int j = 0, i;
+ int j = 0, i, size;
names = is_vport ? vport_basic_q_cnts : basic_q_cnts;
- for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
+ size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) :
+ ARRAY_SIZE(basic_q_cnts);
+ for (i = 0; i < size; i++, j++) {
descs[j].name = names[i].name;
- offsets[j] = basic_q_cnts[i].offset;
+ offsets[j] = names[i].offset;
}
names = is_vport ? vport_out_of_seq_q_cnts : out_of_seq_q_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) :
+ ARRAY_SIZE(out_of_seq_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
- for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
+ for (i = 0; i < size; i++, j++) {
descs[j].name = names[i].name;
- offsets[j] = out_of_seq_q_cnts[i].offset;
+ offsets[j] = names[i].offset;
}
}
names = is_vport ? vport_retrans_q_cnts : retrans_q_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) :
+ ARRAY_SIZE(retrans_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
- for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
+ for (i = 0; i < size; i++, j++) {
descs[j].name = names[i].name;
- offsets[j] = retrans_q_cnts[i].offset;
+ offsets[j] = names[i].offset;
}
}
names = is_vport ? vport_extended_err_cnts : extended_err_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) :
+ ARRAY_SIZE(extended_err_cnts);
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
- for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
+ for (i = 0; i < size; i++, j++) {
descs[j].name = names[i].name;
- offsets[j] = extended_err_cnts[i].offset;
+ offsets[j] = names[i].offset;
}
}
names = is_vport ? vport_roce_accl_cnts : roce_accl_cnts;
+ size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) :
+ ARRAY_SIZE(roce_accl_cnts);
if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
- for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
+ for (i = 0; i < size; i++, j++) {
descs[j].name = names[i].name;
- offsets[j] = roce_accl_cnts[i].offset;
+ offsets[j] = names[i].offset;
}
}
@@ -661,25 +677,37 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
struct mlx5_ib_counters *cnts, u32 port_num)
{
- u32 num_counters, num_op_counters = 0;
+ bool is_vport = is_mdev_switchdev_mode(dev->mdev) &&
+ port_num != MLX5_VPORT_PF;
+ u32 num_counters, num_op_counters = 0, size;
- num_counters = ARRAY_SIZE(basic_q_cnts);
+ size = is_vport ? ARRAY_SIZE(vport_basic_q_cnts) :
+ ARRAY_SIZE(basic_q_cnts);
+ num_counters = size;
+ size = is_vport ? ARRAY_SIZE(vport_out_of_seq_q_cnts) :
+ ARRAY_SIZE(out_of_seq_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
- num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
+ num_counters += size;
+ size = is_vport ? ARRAY_SIZE(vport_retrans_q_cnts) :
+ ARRAY_SIZE(retrans_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
- num_counters += ARRAY_SIZE(retrans_q_cnts);
+ num_counters += size;
+ size = is_vport ? ARRAY_SIZE(vport_extended_err_cnts) :
+ ARRAY_SIZE(extended_err_cnts);
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
- num_counters += ARRAY_SIZE(extended_err_cnts);
+ num_counters += size;
+ size = is_vport ? ARRAY_SIZE(vport_roce_accl_cnts) :
+ ARRAY_SIZE(roce_accl_cnts);
if (MLX5_CAP_GEN(dev->mdev, roce_accl))
- num_counters += ARRAY_SIZE(roce_accl_cnts);
+ num_counters += size;
cnts->num_q_counters = num_counters;
- if (is_mdev_switchdev_mode(dev->mdev) && port_num != MLX5_VPORT_PF)
+ if (is_vport)
goto skip_non_qcounters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
@@ -725,11 +753,11 @@ err:
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
- int num_cnt_ports;
+ int num_cnt_ports = dev->num_ports;
int i, j;
- num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) ||
- vport_qcounters_supported(dev)) ? dev->num_ports : 1;
+ if (is_mdev_switchdev_mode(dev->mdev))
+ num_cnt_ports = min(2, num_cnt_ports);
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
@@ -761,15 +789,22 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
- int num_cnt_ports;
+ int num_cnt_ports = dev->num_ports;
int err = 0;
int i;
bool is_shared;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
- num_cnt_ports = (!is_mdev_switchdev_mode(dev->mdev) ||
- vport_qcounters_supported(dev)) ? dev->num_ports : 1;
+
+ /*
+ * In switchdev we need to allocate two ports, one that is used for
+ * the device Q_counters and it is essentially the real Q_counters of
+ * this device, while the other is used as a helper for PF to be able to
+ * query all other vports.
+ */
+ if (is_mdev_switchdev_mode(dev->mdev))
+ num_cnt_ports = min(2, num_cnt_ports);
for (i = 0; i < num_cnt_ports; i++) {
err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 3008632a6c20..1e419e080b53 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -695,8 +695,6 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
- if (mlx5_ib_shared_ft_allowed(&dev->ib_dev))
- ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
ft_attr.prio = priority;
ft_attr.max_fte = num_entries;
ft_attr.flags = flags;
@@ -2025,6 +2023,237 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject,
return 0;
}
+static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+
+ if (ft_prio->anchor.ft)
+ return 0;
+
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
+ ft_attr.prio = 0;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ ft_prio->anchor.ft = ft;
+
+ return 0;
+}
+
+static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.ft) {
+ mlx5_destroy_flow_table(ft_prio->anchor.ft);
+ ft_prio->anchor.ft = NULL;
+ }
+}
+
+static int
+steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ void *flow_group_in;
+ int err = 0;
+
+ if (ft_prio->anchor.fg_drop)
+ return 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto out;
+ }
+
+ ft_prio->anchor.fg_drop = fg;
+
+out:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
+static void
+steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.fg_drop) {
+ mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
+ ft_prio->anchor.fg_drop = NULL;
+ }
+}
+
+static int
+steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ void *flow_group_in;
+ int err = 0;
+
+ if (ft_prio->anchor.fg_goto_table)
+ return 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto out;
+ }
+ ft_prio->anchor.fg_goto_table = fg;
+
+out:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
+static void
+steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.fg_goto_table) {
+ mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
+ ft_prio->anchor.fg_goto_table = NULL;
+ }
+}
+
+static int
+steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *handle;
+
+ if (ft_prio->anchor.rule_drop)
+ return 0;
+
+ flow_act.fg = ft_prio->anchor.fg_drop;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
+ NULL, 0);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ft_prio->anchor.rule_drop = handle;
+
+ return 0;
+}
+
+static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.rule_drop) {
+ mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
+ ft_prio->anchor.rule_drop = NULL;
+ }
+}
+
+static int
+steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *handle;
+
+ if (ft_prio->anchor.rule_goto_table)
+ return 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act.fg = ft_prio->anchor.fg_goto_table;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = ft_prio->flow_table;
+
+ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ft_prio->anchor.rule_goto_table = handle;
+
+ return 0;
+}
+
+static void
+steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.rule_goto_table) {
+ mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
+ ft_prio->anchor.rule_goto_table = NULL;
+ }
+}
+
+static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ int err;
+
+ err = steering_anchor_create_ft(dev, ft_prio, ns_type);
+ if (err)
+ return err;
+
+ err = steering_anchor_create_fg_drop(ft_prio);
+ if (err)
+ goto destroy_ft;
+
+ err = steering_anchor_create_fg_goto_table(ft_prio);
+ if (err)
+ goto destroy_fg_drop;
+
+ err = steering_anchor_create_rule_drop(ft_prio);
+ if (err)
+ goto destroy_fg_goto_table;
+
+ err = steering_anchor_create_rule_goto_table(ft_prio);
+ if (err)
+ goto destroy_rule_drop;
+
+ return 0;
+
+destroy_rule_drop:
+ steering_anchor_destroy_rule_drop(ft_prio);
+destroy_fg_goto_table:
+ steering_anchor_destroy_fg_goto_table(ft_prio);
+destroy_fg_drop:
+ steering_anchor_destroy_fg_drop(ft_prio);
+destroy_ft:
+ steering_anchor_destroy_ft(ft_prio);
+
+ return err;
+}
+
+static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
+{
+ steering_anchor_destroy_rule_goto_table(ft_prio);
+ steering_anchor_destroy_rule_drop(ft_prio);
+ steering_anchor_destroy_fg_goto_table(ft_prio);
+ steering_anchor_destroy_fg_drop(ft_prio);
+ steering_anchor_destroy_ft(ft_prio);
+}
+
static int steering_anchor_cleanup(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -2035,6 +2264,9 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
return -EBUSY;
mutex_lock(&obj->dev->flow_db->lock);
+ if (!--obj->ft_prio->anchor.rule_goto_table_ref)
+ steering_anchor_destroy_rule_goto_table(obj->ft_prio);
+
put_flow_table(obj->dev, obj->ft_prio, true);
mutex_unlock(&obj->dev->flow_db->lock);
@@ -2042,6 +2274,24 @@ static int steering_anchor_cleanup(struct ib_uobject *uobject,
return 0;
}
+static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
+ int count)
+{
+ while (count--)
+ mlx5_steering_anchor_destroy_res(&prio[count]);
+}
+
+void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
+{
+ fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
+ fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
+ fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
+ fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
+}
+
static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
struct mlx5_ib_flow_matcher *obj)
{
@@ -2182,21 +2432,31 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
return -ENOMEM;
mutex_lock(&dev->flow_db->lock);
+
ft_prio = _get_flow_table(dev, priority, ns_type, 0);
if (IS_ERR(ft_prio)) {
- mutex_unlock(&dev->flow_db->lock);
err = PTR_ERR(ft_prio);
goto free_obj;
}
ft_prio->refcount++;
- ft_id = mlx5_flow_table_id(ft_prio->flow_table);
- mutex_unlock(&dev->flow_db->lock);
+
+ if (!ft_prio->anchor.rule_goto_table_ref) {
+ err = steering_anchor_create_res(dev, ft_prio, ns_type);
+ if (err)
+ goto put_flow_table;
+ }
+
+ ft_prio->anchor.rule_goto_table_ref++;
+
+ ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
&ft_id, sizeof(ft_id));
if (err)
- goto put_flow_table;
+ goto destroy_res;
+
+ mutex_unlock(&dev->flow_db->lock);
uobj->object = obj;
obj->dev = dev;
@@ -2205,8 +2465,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
return 0;
+destroy_res:
+ --ft_prio->anchor.rule_goto_table_ref;
+ mlx5_steering_anchor_destroy_res(ft_prio);
put_flow_table:
- mutex_lock(&dev->flow_db->lock);
put_flow_table(dev, ft_prio, true);
mutex_unlock(&dev->flow_db->lock);
free_obj:
diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
index ad320adaf321..b9734904f5f0 100644
--- a/drivers/infiniband/hw/mlx5/fs.h
+++ b/drivers/infiniband/hw/mlx5/fs.h
@@ -10,6 +10,7 @@
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
#else
static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
@@ -21,9 +22,24 @@ static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->flow_db->lock);
return 0;
}
+
+inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {}
#endif
+
static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
{
+ /* When a steering anchor is created, a special flow table is also
+ * created for the user to reference. Since the user can reference it,
+ * the kernel cannot trust that when the user destroys the steering
+ * anchor, they no longer reference the flow table.
+ *
+ * To address this issue, when a user destroys a steering anchor, only
+ * the flow steering rule in the table is destroyed, but the table
+ * itself is kept to deal with the above scenario. The remaining
+ * resources are only removed when the RDMA device is destroyed, which
+ * is a safe assumption that all references are gone.
+ */
+ mlx5_ib_fs_cleanup_anchor(dev);
kfree(dev->flow_db);
}
#endif /* _MLX5_IB_FS_H */
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5d45de223c43..f0b394ed7452 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4275,6 +4275,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
mlx5_ib_restrack_init,
NULL),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index efa4dc6e7dee..2dfa6f49a6f4 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -237,8 +237,19 @@ enum {
#define MLX5_IB_NUM_SNIFFER_FTS 2
#define MLX5_IB_NUM_EGRESS_FTS 1
#define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
+
+struct mlx5_ib_anchor {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg_goto_table;
+ struct mlx5_flow_group *fg_drop;
+ struct mlx5_flow_handle *rule_goto_table;
+ struct mlx5_flow_handle *rule_drop;
+ unsigned int rule_goto_table_ref;
+};
+
struct mlx5_ib_flow_prio {
struct mlx5_flow_table *flow_table;
+ struct mlx5_ib_anchor anchor;
unsigned int refcount;
};
@@ -1587,6 +1598,9 @@ static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
return 0;
+ if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
+ return 0;
+
return dev->lag_active ||
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 70ca8ffa9256..78b96bfb4e6a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1237,6 +1237,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
+ if (!mlx5_ib_lag_should_assign_affinity(dev) &&
+ mlx5_lag_is_lacp_owner(dev->mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index db18ace74d2b..f46c5a5fd0ae 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
void retransmit_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+ unsigned long flags;
rxe_dbg_qp(qp, "retransmit timer fired\n");
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) {
qp->comp.timeout = 1;
rxe_sched_task(&qp->comp.task);
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
@@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
static void comp_check_sq_drain_done(struct rxe_qp *qp)
{
- spin_lock_bh(&qp->state_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
qp->attr.sq_draining = 0;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp)
return;
}
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
static inline enum comp_state complete_ack(struct rxe_qp *qp,
@@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt)
*/
static void reset_retry_timer(struct rxe_qp *qp)
{
+ unsigned long flags;
+
if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) >= IB_QPS_RTS &&
psn_compare(qp->req.psn, qp->comp.psn) > 0)
mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
}
@@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp)
struct rxe_pkt_info *pkt = NULL;
enum comp_state state;
int ret;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_resp_pkts(qp);
flush_send_queue(qp, notify);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->comp.timeout) {
qp->comp.timeout_retry = 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 20ff0c0c4605..6ca2a05b6a2a 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -113,8 +113,6 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_irqrestore(&cq->cq_lock, flags);
-
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = 0;
@@ -122,6 +120,8 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 2bc7361152ea..cd59666158b1 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -159,6 +159,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
pkt->mask = RXE_GRH_MASK;
pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+ /* remove udp header */
+ skb_pull(skb, sizeof(struct udphdr));
+
rxe_rcv(skb);
return 0;
@@ -401,6 +404,9 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
return -EIO;
}
+ /* remove udp header */
+ skb_pull(skb, sizeof(struct udphdr));
+
rxe_rcv(skb);
return 0;
@@ -412,15 +418,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
int err;
int is_request = pkt->mask & RXE_REQ_MASK;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
(!is_request && (qp_state(qp) < IB_QPS_RTR))) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
goto drop;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_icrc_generate(skb, pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c5451a4488ca..a0f206431cf8 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -176,6 +176,9 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->rq.producer_lock);
spin_lock_init(&qp->rq.consumer_lock);
+ skb_queue_head_init(&qp->req_pkts);
+ skb_queue_head_init(&qp->resp_pkts);
+
atomic_set(&qp->ssn, 0);
atomic_set(&qp->skb_out, 0);
}
@@ -234,8 +237,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->req.opcode = -1;
qp->comp.opcode = -1;
- skb_queue_head_init(&qp->req_pkts);
-
rxe_init_task(&qp->req.task, qp, rxe_requester);
rxe_init_task(&qp->comp.task, qp, rxe_completer);
@@ -279,8 +280,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- skb_queue_head_init(&qp->resp_pkts);
-
rxe_init_task(&qp->resp.task, qp, rxe_responder);
qp->resp.opcode = OPCODE_NONE;
@@ -300,6 +299,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+ unsigned long flags;
rxe_get(pd);
rxe_get(rcq);
@@ -325,10 +325,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
if (err)
goto err2;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.qp_state = IB_QPS_RESET;
qp->valid = 1;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return 0;
@@ -492,24 +492,28 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move the qp to the error state */
void rxe_qp_error(struct rxe_qp *qp)
{
- spin_lock_bh(&qp->state_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
rxe_sched_task(&qp->resp.task);
rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
int mask)
{
- spin_lock_bh(&qp->state_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.sq_draining = 1;
rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
/* caller should hold qp->state_lock */
@@ -555,14 +559,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.cur_qp_state = attr->qp_state;
if (mask & IB_QP_STATE) {
- spin_lock_bh(&qp->state_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
err = __qp_chk_state(qp, attr, mask);
if (!err) {
qp->attr.qp_state = attr->qp_state;
rxe_dbg_qp(qp, "state -> %s\n",
qps2str[attr->qp_state]);
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (err)
return err;
@@ -688,6 +694,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
/* called by the query qp verb */
int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
{
+ unsigned long flags;
+
*attr = qp->attr;
attr->rq_psn = qp->resp.psn;
@@ -708,12 +716,13 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
/* Applications that get this state typically spin on it.
* Yield the processor
*/
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp->attr.sq_draining) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
cond_resched();
+ } else {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
- spin_unlock_bh(&qp->state_lock);
return 0;
}
@@ -736,10 +745,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp)
static void rxe_qp_do_cleanup(struct work_struct *work)
{
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
qp->valid = 0;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0;
if (qp_type(qp) == IB_QPT_RC) {
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 2f953cc74256..5861e4244049 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_qp *qp)
{
unsigned int pkt_type;
+ unsigned long flags;
if (unlikely(!qp->valid))
return -EINVAL;
@@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EINVAL;
}
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return -EINVAL;
}
} else {
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return -EINVAL;
}
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 65134a9aefe7..5fe7cbae3031 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp)
void rnr_nak_timer(struct timer_list *t)
{
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
+ unsigned long flags;
rxe_dbg_qp(qp, "nak timer fired\n");
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) {
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
rxe_sched_task(&qp->req.task);
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
static void req_check_sq_drain_done(struct rxe_qp *qp)
@@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
unsigned int index;
unsigned int cons;
struct rxe_send_wqe *wqe;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_SQD) {
q = qp->sq.queue;
index = qp->req.wqe_index;
@@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
break;
qp->attr.sq_draining = 0;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
return;
} while (0);
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
@@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
struct rxe_send_wqe *wqe;
+ unsigned long flags;
req_check_sq_drain_done(qp);
@@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (wqe == NULL)
return NULL;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
(wqe->state != wqe_state_processing))) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return NULL;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe;
@@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp)
struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah;
struct rxe_av *av;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(!qp->valid)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit;
}
if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
wqe = __req_next_wqe(qp);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (wqe)
goto err;
else
@@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wait_psn = 0;
qp->req.need_retry = 0;
qp->req.wait_for_rnr_timer = 0;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
/* we come here if the retransmit timer has fired
* or if the rnr timer has fired. If the retransmit
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 68f6cd188d8e..ee68306555b9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -489,8 +489,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
if (mw->access & IB_ZERO_BASED)
qp->resp.offset = mw->addr;
- rxe_put(mw);
rxe_get(mr);
+ rxe_put(mw);
+ mw = NULL;
} else {
mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
if (!mr) {
@@ -1047,6 +1048,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
struct ib_uverbs_wc *uwc = &cqe.uibwc;
struct rxe_recv_wqe *wqe = qp->resp.wqe;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
if (!wqe)
goto finish;
@@ -1137,12 +1139,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_ERR_CQ_OVERFLOW;
finish:
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return RESPST_CHK_RESOURCE;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(!pkt))
return RESPST_DONE;
@@ -1468,18 +1470,19 @@ int rxe_responder(struct rxe_qp *qp)
enum resp_states state;
struct rxe_pkt_info *pkt = NULL;
int ret;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_req_pkts(qp);
flush_recv_queue(qp, notify);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index dea605b7f683..83093e16b6c6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
if (!err)
rxe_sched_task(&qp->req.task);
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->comp.task);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return err;
}
@@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
{
struct rxe_qp *qp = to_rqp(ibqp);
int err;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed");
return -EINVAL;
}
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
rxe_err_qp(qp, "qp not ready to send");
return -EINVAL;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->is_user) {
/* Utilize process context to do protocol processing */
@@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
struct rxe_rq *rq = &qp->rq;
unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed");
return -EINVAL;
}
/* see C10-97.2.1 */
if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
rxe_dbg_qp(qp, "qp not ready to post recv");
return -EINVAL;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) {
*bad_wr = wr;
@@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->resp.task);
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return err;
}
@@ -1356,7 +1357,7 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
- kfree_rcu(mr);
+ kfree_rcu_mightsleep(mr);
return 0;
err_out:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f290cd49698e..92e1e7587af8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -657,9 +657,13 @@ static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ struct isert_np *isert_np = cma_id->context;
ib_drain_qp(isert_conn->qp);
+
+ mutex_lock(&isert_np->mutex);
list_del_init(&isert_conn->node);
+ mutex_unlock(&isert_np->mutex);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
@@ -2431,6 +2435,7 @@ isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
+ LIST_HEAD(drop_conn_list);
if (isert_np->cm_id)
rdma_destroy_id(isert_np->cm_id);
@@ -2450,7 +2455,7 @@ isert_free_np(struct iscsi_np *np)
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
- isert_connect_release(isert_conn);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
@@ -2461,11 +2466,16 @@ isert_free_np(struct iscsi_np *np)
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
- isert_connect_release(isert_conn);
+ list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
mutex_unlock(&isert_np->mutex);
+ list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
+ list_del_init(&isert_conn->node);
+ isert_connect_release(isert_conn);
+ }
+
np->np_context = NULL;
kfree(isert_np);
}
@@ -2560,8 +2570,6 @@ static void isert_wait_conn(struct iscsit_conn *conn)
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
-
- queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsit_conn *conn)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index edb2e3a25880..cfb50bfe53c3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2040,6 +2040,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
return 0;
}
+/* The caller should do the cleanup in case of error */
static int create_cm(struct rtrs_clt_con *con)
{
struct rtrs_path *s = con->c.path;
@@ -2062,14 +2063,14 @@ static int create_cm(struct rtrs_clt_con *con)
err = rdma_set_reuseaddr(cm_id, 1);
if (err != 0) {
rtrs_err(s, "Set address reuse failed, err: %d\n", err);
- goto destroy_cm;
+ return err;
}
err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
(struct sockaddr *)&clt_path->s.dst_addr,
RTRS_CONNECT_TIMEOUT_MS);
if (err) {
rtrs_err(s, "Failed to resolve address, err: %d\n", err);
- goto destroy_cm;
+ return err;
}
/*
* Combine connection status and session events. This is needed
@@ -2084,29 +2085,15 @@ static int create_cm(struct rtrs_clt_con *con)
if (err == 0)
err = -ETIMEDOUT;
/* Timedout or interrupted */
- goto errr;
- }
- if (con->cm_err < 0) {
- err = con->cm_err;
- goto errr;
+ return err;
}
- if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
+ if (con->cm_err < 0)
+ return con->cm_err;
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
/* Device removal */
- err = -ECONNABORTED;
- goto errr;
- }
+ return -ECONNABORTED;
return 0;
-
-errr:
- stop_cm(con);
- mutex_lock(&con->con_mutex);
- destroy_con_cq_qp(con);
- mutex_unlock(&con->con_mutex);
-destroy_cm:
- destroy_cm(con);
-
- return err;
}
static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
@@ -2334,7 +2321,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
static int init_conns(struct rtrs_clt_path *clt_path)
{
unsigned int cid;
- int err;
+ int err, i;
/*
* On every new session connections increase reconnect counter
@@ -2350,10 +2337,8 @@ static int init_conns(struct rtrs_clt_path *clt_path)
goto destroy;
err = create_cm(to_clt_con(clt_path->s.con[cid]));
- if (err) {
- destroy_con(to_clt_con(clt_path->s.con[cid]));
+ if (err)
goto destroy;
- }
}
err = alloc_path_reqs(clt_path);
if (err)
@@ -2364,15 +2349,21 @@ static int init_conns(struct rtrs_clt_path *clt_path)
return 0;
destroy:
- while (cid--) {
- struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
+ /* Make sure we do the cleanup in the order they are created */
+ for (i = 0; i <= cid; i++) {
+ struct rtrs_clt_con *con;
- stop_cm(con);
+ if (!clt_path->s.con[i])
+ break;
- mutex_lock(&con->con_mutex);
- destroy_con_cq_qp(con);
- mutex_unlock(&con->con_mutex);
- destroy_cm(con);
+ con = to_clt_con(clt_path->s.con[i]);
+ if (con->c.cm_id) {
+ stop_cm(con);
+ mutex_lock(&con->con_mutex);
+ destroy_con_cq_qp(con);
+ mutex_unlock(&con->con_mutex);
+ destroy_cm(con);
+ }
destroy_con(con);
}
/*
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 4bf9d868cc52..3696f367ff51 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -37,8 +37,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
goto err;
iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
- if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
+ if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
+ kfree(iu->buf);
goto err;
+ }
iu->cqe.done = done;
iu->size = size;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 37e876d45eb9..641eb86f276e 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -703,7 +703,7 @@ void input_close_device(struct input_handle *handle)
__input_release_device(handle);
- if (!dev->inhibited && !--dev->users) {
+ if (!--dev->users && !dev->inhibited) {
if (dev->poller)
input_dev_poller_stop(dev->poller);
if (dev->close)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 28be88e0e96a..f33622fe946f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -281,7 +281,6 @@ static const struct xpad_device {
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 09489380afda..e79f5497948b 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -109,6 +109,27 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
};
/*
+ * Some devices have a wrong entry which points to a GPIO which is
+ * required in another driver, so this driver must not claim it.
+ */
+static const struct dmi_system_id dmi_invalid_acpi_index[] = {
+ {
+ /*
+ * Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry
+ * points to a GPIO which is not a home button and which is
+ * required by the lenovo-yogabook driver.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ },
+ .driver_data = (void *)1l,
+ },
+ {} /* Terminating entry */
+};
+
+/*
* Get the Nth GPIO number from the ACPI object.
*/
static int soc_button_lookup_gpio(struct device *dev, int acpi_index,
@@ -137,6 +158,8 @@ soc_button_device_create(struct platform_device *pdev,
struct platform_device *pd;
struct gpio_keys_button *gpio_keys;
struct gpio_keys_platform_data *gpio_keys_pdata;
+ const struct dmi_system_id *dmi_id;
+ int invalid_acpi_index = -1;
int error, gpio, irq;
int n_buttons = 0;
@@ -154,10 +177,17 @@ soc_button_device_create(struct platform_device *pdev,
gpio_keys = (void *)(gpio_keys_pdata + 1);
n_buttons = 0;
+ dmi_id = dmi_first_match(dmi_invalid_acpi_index);
+ if (dmi_id)
+ invalid_acpi_index = (long)dmi_id->driver_data;
+
for (info = button_info; info->name; info++) {
if (info->autorepeat != autorepeat)
continue;
+ if (info->acpi_index == invalid_acpi_index)
+ continue;
+
error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
if (error || irq < 0) {
/*
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ece97f8c6a3e..2118b2075f43 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
- int id = ((packet[3] & 0xe0) >> 5) - 1;
+ int id;
int pres, traces;
- if (id < 0)
+ id = ((packet[3] & 0xe0) >> 5) - 1;
+ if (id < 0 || id >= ETP_MAX_FINGERS)
return;
etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
@@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
int id, sid;
id = ((packet[0] & 0xe0) >> 5) - 1;
- if (id < 0)
+ if (id < 0 || id >= ETP_MAX_FINGERS)
return;
sid = ((packet[3] & 0xe0) >> 5) - 1;
@@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
- if (sid >= 0) {
+ if (sid >= 0 && sid < ETP_MAX_FINGERS) {
etd->mt[sid].x += delta_x2 * weight;
etd->mt[sid].y -= delta_y2 * weight;
input_mt_slot(dev, sid);
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index 30102cb80fac..3c9d07218f48 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -560,7 +560,7 @@ static int cyttsp5_hid_output_get_sysinfo(struct cyttsp5 *ts)
static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts)
{
int rc;
- u8 cmd[HID_OUTPUT_BL_LAUNCH_APP];
+ u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE];
u16 crc;
put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd);
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 577c75c83e25..bb3c6072fc82 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -22,7 +22,7 @@
* in the kernel). So this driver offers straight forward, reliable single
* touch functionality only.
*
- * s.a. A20 User Manual "1.15 TP" (Documentation/arm/sunxi.rst)
+ * s.a. A20 User Manual "1.15 TP" (Documentation/arch/arm/sunxi.rst)
* (looks like the description in the A20 User Manual v1.3 is better
* than the one in the A10 User Manual v.1.5)
*/
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index db98c3f86e8c..4d800601e8ec 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -282,6 +282,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
@@ -417,22 +418,6 @@ config S390_IOMMU
help
Support for the IOMMU API for s390 PCI devices.
-config S390_CCW_IOMMU
- bool "S390 CCW IOMMU Support"
- depends on S390 && CCW || COMPILE_TEST
- select IOMMU_API
- help
- Enables bits of IOMMU API required by VFIO. The iommu_ops
- is not implemented as it is not necessary for VFIO.
-
-config S390_AP_IOMMU
- bool "S390 AP IOMMU Support"
- depends on S390 && ZCRYPT || COMPILE_TEST
- select IOMMU_API
- help
- Enables bits of IOMMU API required by VFIO. The iommu_ops
- is not implemented as it is not necessary for VFIO.
-
config MTK_IOMMU
tristate "MediaTek IOMMU Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index e98f20a9bdd8..9beeceb9d825 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
+extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 329a406cc37d..c2d80a4e5fb0 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -759,6 +759,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
}
/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an GA log overflow.
+ */
+void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
+{
+ u32 status;
+
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & MMIO_STATUS_GALOG_RUN_MASK)
+ return;
+
+ pr_info_ratelimited("IOMMU GA Log restarting\n");
+
+ iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+ iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+ writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
+ iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_GAINT_EN);
+ iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+}
+
+/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 1e9f85eec521..9ea40960978b 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
(MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
+ MMIO_STATUS_GALOG_OVERFLOW_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
irqreturn_t amd_iommu_int_thread(int irq, void *data)
@@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
}
#ifdef CONFIG_IRQ_REMAP
- if (status & MMIO_STATUS_GALOG_INT_MASK) {
+ if (status & (MMIO_STATUS_GALOG_INT_MASK |
+ MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
pr_devel("Processing IOMMU GA Log\n");
iommu_poll_ga_log(iommu);
}
+
+ if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
+ pr_info_ratelimited("IOMMU GA Log overflow\n");
+ amd_iommu_restart_ga_log(iommu);
+ }
#endif
if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
@@ -2067,14 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
- int pgtable = amd_iommu_pgtable;
+ int pgtable;
int mode = DEFAULT_PGTABLE_LEVEL;
int ret;
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
/*
* Force IOMMU v1 page table when iommu=pt and
* when allocating domain for pass-through devices.
@@ -2084,8 +2087,16 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
mode = PAGE_MODE_NONE;
} else if (type == IOMMU_DOMAIN_UNMANAGED) {
pgtable = AMD_IOMMU_V1;
+ } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
+ pgtable = amd_iommu_pgtable;
+ } else {
+ return NULL;
}
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
switch (pgtable) {
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);
@@ -2118,6 +2129,15 @@ out_err:
return NULL;
}
+static inline u64 dma_max_address(void)
+{
+ if (amd_iommu_pgtable == AMD_IOMMU_V1)
+ return ~0ULL;
+
+ /* V2 with 4/5 level page table */
+ return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
+}
+
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
@@ -2134,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
return NULL;
domain->domain.geometry.aperture_start = 0;
- domain->domain.geometry.aperture_end = ~0ULL;
+ domain->domain.geometry.aperture_end = dma_max_address();
domain->domain.geometry.force_aperture = true;
return &domain->domain;
@@ -2387,7 +2407,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
+ domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -3491,8 +3511,7 @@ int amd_iommu_activate_guest_mode(void *data)
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
u64 valid;
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
- !entry || entry->lo.fields_vapic.guest_mode)
+ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
return 0;
valid = entry->lo.fields_vapic.valid;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index ae09c627bc84..c71afda79d64 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -517,6 +517,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
@@ -561,5 +562,14 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
if (match)
return qcom_smmu_create(smmu, match->data);
+ /*
+ * If you hit this WARN_ON() you are missing an entry in the
+ * qcom_smmu_impl_of_match[] table, and GPU per-process page-
+ * tables will be broken.
+ */
+ WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
+ "Missing qcom_smmu_impl_of_match entry for: %s",
+ dev_name(smmu->dev));
+
return smmu;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index aecc7d154f28..e93906d6e112 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -781,7 +781,8 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- mtk_iommu_tlb_flush_all(dom->bank->parent_data);
+ if (dom->bank)
+ mtk_iommu_tlb_flush_all(dom->bank->parent_data);
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ea5a3088bb7e..4054030c3237 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1335,20 +1335,22 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < iommu->num_irq; i++) {
int irq = platform_get_irq(pdev, i);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ err = irq;
+ goto err_pm_disable;
+ }
err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
IRQF_SHARED, dev_name(dev), iommu);
- if (err) {
- pm_runtime_disable(dev);
- goto err_remove_sysfs;
- }
+ if (err)
+ goto err_pm_disable;
}
dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
return 0;
+err_pm_disable:
+ pm_runtime_disable(dev);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
err_put_group:
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 77ebe7e47e0e..e731e0784f7e 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -212,12 +212,6 @@ out_kfree:
return err;
}
-void __init clps711x_intc_init(phys_addr_t base, resource_size_t size)
-{
- BUG_ON(_clps711x_intc_init(NULL, base, size));
-}
-
-#ifdef CONFIG_IRQCHIP
static int __init clps711x_intc_init_dt(struct device_node *np,
struct device_node *parent)
{
@@ -231,4 +225,3 @@ static int __init clps711x_intc_init_dt(struct device_node *np,
return _clps711x_intc_init(np, res.start, resource_size(&res));
}
IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt);
-#endif
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 46a3aa60e50e..359efc1d1be7 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = {
/* Local static for the IRQ entry call */
static struct ft010_irq_data firq;
-asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
+static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
{
struct ft010_irq_data *f = &firq;
int irq;
@@ -162,7 +162,7 @@ static const struct irq_domain_ops ft010_irqdomain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-int __init ft010_of_init_irq(struct device_node *node,
+static int __init ft010_of_init_irq(struct device_node *node,
struct device_node *parent)
{
struct ft010_irq_data *f = &firq;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index a610821c8ff2..afd6a1841715 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -16,7 +16,13 @@ void gic_enable_of_quirks(const struct device_node *np,
const struct gic_quirk *quirks, void *data)
{
for (; quirks->desc; quirks++) {
- if (!of_device_is_compatible(np, quirks->compatible))
+ if (!quirks->compatible && !quirks->property)
+ continue;
+ if (quirks->compatible &&
+ !of_device_is_compatible(np, quirks->compatible))
+ continue;
+ if (quirks->property &&
+ !of_property_read_bool(np, quirks->property))
continue;
if (quirks->init(data))
pr_info("GIC: enabling workaround for %s\n",
@@ -28,7 +34,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data)
{
for (; quirks->desc; quirks++) {
- if (quirks->compatible)
+ if (quirks->compatible || quirks->property)
continue;
if (quirks->iidr != (quirks->mask & iidr))
continue;
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 27e3d4ed4f32..3db4592cda1c 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -13,6 +13,7 @@
struct gic_quirk {
const char *desc;
const char *compatible;
+ const char *property;
bool (*init)(void *data);
u32 iidr;
u32 mask;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0ec2b1e1df75..1994541eaef8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3585,6 +3585,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irqd = irq_get_irq_data(virq + i);
irqd_set_single_target(irqd);
irqd_set_affinity_on_activate(irqd);
+ irqd_set_resend_when_in_progress(irqd);
pr_debug("ID:%d pID:%d vID:%d\n",
(int)(hwirq + i - its_dev->event_map.lpi_base),
(int)(hwirq + i), virq + i);
@@ -4523,6 +4524,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irq_domain_set_hwirq_and_chip(domain, virq + i, i,
irqchip, vm->vpes[i]);
set_bit(i, bitmap);
+ irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
if (err) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6fcee221f201..0c6c1af9a5b7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -39,6 +39,8 @@
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
+#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
+#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
@@ -655,10 +657,16 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
return 0;
}
-static u64 gic_mpidr_to_affinity(unsigned long mpidr)
+static u64 gic_cpu_to_affinity(int cpu)
{
+ u64 mpidr = cpu_logical_map(cpu);
u64 aff;
+ /* ASR8601 needs to have its affinities shifted down... */
+ if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
+ mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
+ (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
+
aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
@@ -913,7 +921,7 @@ static void __init gic_dist_init(void)
* Set all global interrupts to the boot CPU only. ARE must be
* enabled.
*/
- affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+ affinity = gic_cpu_to_affinity(smp_processor_id());
for (i = 32; i < GIC_LINE_NR; i++)
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
@@ -962,7 +970,7 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
{
- unsigned long mpidr = cpu_logical_map(smp_processor_id());
+ unsigned long mpidr;
u64 typer;
u32 aff;
@@ -970,6 +978,8 @@ static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
+ mpidr = gic_cpu_to_affinity(smp_processor_id());
+
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
@@ -1083,7 +1093,7 @@ static inline bool gic_dist_security_disabled(void)
static void gic_cpu_sys_reg_init(void)
{
int i, cpu = smp_processor_id();
- u64 mpidr = cpu_logical_map(cpu);
+ u64 mpidr = gic_cpu_to_affinity(cpu);
u64 need_rss = MPIDR_RS(mpidr);
bool group0;
u32 pribits;
@@ -1182,11 +1192,11 @@ static void gic_cpu_sys_reg_init(void)
for_each_online_cpu(i) {
bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
- need_rss |= MPIDR_RS(cpu_logical_map(i));
+ need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
if (need_rss && (!have_rss))
pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
cpu, (unsigned long)mpidr,
- i, (unsigned long)cpu_logical_map(i));
+ i, (unsigned long)gic_cpu_to_affinity(i));
}
/**
@@ -1262,9 +1272,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id)
{
int next_cpu, cpu = *base_cpu;
- unsigned long mpidr = cpu_logical_map(cpu);
+ unsigned long mpidr;
u16 tlist = 0;
+ mpidr = gic_cpu_to_affinity(cpu);
+
while (cpu < nr_cpu_ids) {
tlist |= 1 << (mpidr & 0xf);
@@ -1273,7 +1285,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
goto out;
cpu = next_cpu;
- mpidr = cpu_logical_map(cpu);
+ mpidr = gic_cpu_to_affinity(cpu);
if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
cpu--;
@@ -1318,7 +1330,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
dsb(ishst);
for_each_cpu(cpu, mask) {
- u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
+ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
u16 tlist;
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -1376,7 +1388,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
offset = convert_offset_index(d, GICD_IROUTER, &index);
reg = gic_dist_base(d) + offset + (index * 8);
- val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+ val = gic_cpu_to_affinity(cpu);
gic_write_irouter(val, reg);
@@ -1720,6 +1732,15 @@ static bool gic_enable_quirk_msm8996(void *data)
return true;
}
+static bool gic_enable_quirk_mtk_gicr(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
+
+ return true;
+}
+
static bool gic_enable_quirk_cavium_38539(void *data)
{
struct gic_chip_data *d = data;
@@ -1786,6 +1807,15 @@ static bool gic_enable_quirk_nvidia_t241(void *data)
return true;
}
+static bool gic_enable_quirk_asr8601(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
+
+ return true;
+}
+
static const struct gic_quirk gic_quirks[] = {
{
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
@@ -1793,6 +1823,16 @@ static const struct gic_quirk gic_quirks[] = {
.init = gic_enable_quirk_msm8996,
},
{
+ .desc = "GICv3: ASR erratum 8601001",
+ .compatible = "asr,asr8601-gic-v3",
+ .init = gic_enable_quirk_asr8601,
+ },
+ {
+ .desc = "GICv3: Mediatek Chromebook GICR save problem",
+ .property = "mediatek,broken-save-restore-fw",
+ .init = gic_enable_quirk_mtk_gicr,
+ },
+ {
.desc = "GICv3: HIP06 erratum 161010803",
.iidr = 0x0204043b,
.mask = 0xffffffff,
@@ -1834,6 +1874,11 @@ static void gic_enable_nmi_support(void)
if (!gic_prio_masking_enabled())
return;
+ if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
+ pr_warn("Skipping NMI enable due to firmware issues\n");
+ return;
+ }
+
ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
if (!ppi_nmi_refs)
return;
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 5f47d8ee4ae3..b9dcc8e78c75 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -68,6 +68,7 @@ static int __init aic_irq_of_init(struct device_node *node,
unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
struct irq_domain *domain;
+ int ret;
pr_info("Initializing J-Core AIC\n");
@@ -100,6 +101,12 @@ static int __init aic_irq_of_init(struct device_node *node,
jcore_aic.irq_unmask = noop;
jcore_aic.name = "AIC";
+ ret = irq_alloc_descs(-1, min_irq, dom_sz - min_irq,
+ of_node_to_nid(node));
+
+ if (ret < 0)
+ return ret;
+
domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
&jcore_aic_irqdomain_ops,
&jcore_aic);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 71ef19f77a5a..92d8aa28bdf5 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -36,6 +36,7 @@ static int nr_pics;
struct eiointc_priv {
u32 node;
+ u32 vec_count;
nodemask_t node_map;
cpumask_t cpuspan_map;
struct fwnode_handle *domain_handle;
@@ -153,18 +154,18 @@ static int eiointc_router_init(unsigned int cpu)
if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
eiointc_enable();
- for (i = 0; i < VEC_COUNT / 32; i++) {
+ for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
}
- for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
+ for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
bit = BIT(1 + index); /* Route to IP[1 + index] */
data = bit | (bit << 8) | (bit << 16) | (bit << 24);
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
}
- for (i = 0; i < VEC_COUNT / 4; i++) {
+ for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
/* Route to Node-0 Core-0 */
if (index == 0)
bit = BIT(cpu_logical_map(0));
@@ -175,7 +176,7 @@ static int eiointc_router_init(unsigned int cpu)
iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
}
- for (i = 0; i < VEC_COUNT / 32; i++) {
+ for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
data = 0xffffffff;
iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
@@ -195,7 +196,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- for (i = 0; i < VEC_REG_COUNT; i++) {
+ for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
while (pending) {
@@ -310,11 +311,11 @@ static void eiointc_resume(void)
eiointc_router_init(0);
for (i = 0; i < nr_pics; i++) {
- for (j = 0; j < VEC_COUNT; j++) {
+ for (j = 0; j < eiointc_priv[0]->vec_count; j++) {
desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
raw_spin_lock(&desc->lock);
- irq_data = &desc->irq_data;
+ irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
raw_spin_unlock(&desc->lock);
}
@@ -375,11 +376,47 @@ static int __init acpi_cascade_irqdomain_init(void)
return 0;
}
+static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
+ u64 node_map)
+{
+ int i;
+
+ node_map = node_map ? node_map : -1ULL;
+ for_each_possible_cpu(i) {
+ if (node_map & (1ULL << (cpu_to_eio_node(i)))) {
+ node_set(cpu_to_eio_node(i), priv->node_map);
+ cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map,
+ cpumask_of(i));
+ }
+ }
+
+ priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle,
+ priv->vec_count,
+ &eiointc_domain_ops,
+ priv);
+ if (!priv->eiointc_domain) {
+ pr_err("loongson-extioi: cannot add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ eiointc_priv[nr_pics++] = priv;
+ eiointc_router_init(0);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
+
+ if (nr_pics == 1) {
+ register_syscore_ops(&eiointc_syscore_ops);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ "irqchip/loongarch/intc:starting",
+ eiointc_router_init, NULL);
+ }
+
+ return 0;
+}
+
int __init eiointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_eio_pic *acpi_eiointc)
{
- int i, ret, parent_irq;
- unsigned long node_map;
+ int parent_irq, ret;
struct eiointc_priv *priv;
int node;
@@ -394,37 +431,14 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
goto out_free_priv;
}
+ priv->vec_count = VEC_COUNT;
priv->node = acpi_eiointc->node;
- node_map = acpi_eiointc->node_map ? : -1ULL;
-
- for_each_possible_cpu(i) {
- if (node_map & (1ULL << cpu_to_eio_node(i))) {
- node_set(cpu_to_eio_node(i), priv->node_map);
- cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
- }
- }
-
- /* Setup IRQ domain */
- priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
- &eiointc_domain_ops, priv);
- if (!priv->eiointc_domain) {
- pr_err("loongson-eiointc: cannot add IRQ domain\n");
- goto out_free_handle;
- }
-
- eiointc_priv[nr_pics++] = priv;
-
- eiointc_router_init(0);
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
- irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
- if (nr_pics == 1) {
- register_syscore_ops(&eiointc_syscore_ops);
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
- "irqchip/loongarch/intc:starting",
- eiointc_router_init, NULL);
- }
+ ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
+ if (ret < 0)
+ goto out_free_handle;
if (cpu_has_flatmode)
node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
@@ -432,7 +446,10 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
node = acpi_eiointc->node;
acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
+
ret = acpi_cascade_irqdomain_init();
+ if (ret < 0)
+ goto out_free_handle;
return ret;
@@ -444,3 +461,49 @@ out_free_priv:
return -ENOMEM;
}
+
+static int __init eiointc_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ int parent_irq, ret;
+ struct eiointc_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (parent_irq <= 0) {
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
+ ret = irq_set_handler_data(parent_irq, priv);
+ if (ret < 0)
+ goto out_free_priv;
+
+ /*
+ * In particular, the number of devices supported by the LS2K0500
+ * extended I/O interrupt vector is 128.
+ */
+ if (of_device_is_compatible(of_node, "loongson,ls2k0500-eiointc"))
+ priv->vec_count = 128;
+ else
+ priv->vec_count = VEC_COUNT;
+
+ priv->node = 0;
+ priv->domain_handle = of_node_to_fwnode(of_node);
+
+ ret = eiointc_init(priv, parent_irq, 0);
+ if (ret < 0)
+ goto out_free_priv;
+
+ return 0;
+
+out_free_priv:
+ kfree(priv);
+ return ret;
+}
+
+IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init);
+IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init);
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 8d00a9ad5b00..e4b33aed1c97 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -32,6 +32,10 @@
#define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04)
#define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08)
#define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c)
+/*
+ * LIOINTC_REG_INTC_POL register is only valid for Loongson-2K series, and
+ * Loongson-3 series behave as noops.
+ */
#define LIOINTC_REG_INTC_POL (LIOINTC_INTC_CHIP_START + 0x10)
#define LIOINTC_REG_INTC_EDGE (LIOINTC_INTC_CHIP_START + 0x14)
@@ -116,19 +120,19 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
- liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
break;
case IRQ_TYPE_LEVEL_LOW:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
- liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
break;
case IRQ_TYPE_EDGE_RISING:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
- liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
break;
case IRQ_TYPE_EDGE_FALLING:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, true);
- liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, false);
+ liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
break;
default:
irq_gc_unlock_irqrestore(gc, flags);
@@ -291,6 +295,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_type = liointc_set_type;
+ ct->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->mask_cache = 0;
priv->gc = gc;
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index e5fe4d50be05..93a71f66efeb 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -164,7 +164,7 @@ static int pch_pic_domain_translate(struct irq_domain *d,
if (fwspec->param_count < 2)
return -EINVAL;
- *hwirq = fwspec->param[0] + priv->ht_vec_base;
+ *hwirq = fwspec->param[0];
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
} else {
if (fwspec->param_count < 1)
@@ -196,7 +196,7 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq,
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 1;
- parent_fwspec.param[0] = hwirq;
+ parent_fwspec.param[0] = hwirq + priv->ht_vec_base;
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
if (err)
@@ -401,14 +401,12 @@ static int __init acpi_cascade_irqdomain_init(void)
int __init pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic)
{
- int ret, vec_base;
+ int ret;
struct fwnode_handle *domain_handle;
if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
return 0;
- vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
-
domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
@@ -416,7 +414,7 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
}
ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
- vec_base, parent, domain_handle, acpi_pchpic->gsi_base);
+ 0, parent, domain_handle, acpi_pchpic->gsi_base);
if (ret < 0) {
irq_domain_free_fwnode(domain_handle);
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index eada5e0e3eb9..5101a3fb11df 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -240,26 +240,27 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
+ int ret = 0;
+
+ parent = bus_get_dev_root(&platform_bus_type);
+ if (!parent)
+ return -ENODEV;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
- parent = bus_get_dev_root(&platform_bus_type);
- if (parent) {
- child = of_platform_device_create(np, NULL, parent);
- put_device(parent);
- if (!child) {
- of_node_put(np);
- return -ENOMEM;
- }
+ child = of_platform_device_create(np, NULL, parent);
+ if (!child) {
+ ret = -ENOMEM;
+ break;
}
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
- of_node_put(np);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
domain = platform_msi_create_device_domain(&child->dev, num_pins,
@@ -267,12 +268,16 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
&mbigen_domain_ops,
mgn_chip);
if (!domain) {
- of_node_put(np);
- return -ENOMEM;
+ ret = -ENOMEM;
+ break;
}
}
- return 0;
+ put_device(parent);
+ if (ret)
+ of_node_put(np);
+
+ return ret;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 2aaa9aad3e87..7da18ef95211 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -150,7 +150,7 @@ static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
-static const struct of_device_id meson_irq_gpio_matches[] = {
+static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
{ .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 046c355e120b..6d5ecc10a870 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -50,7 +50,7 @@ void __iomem *mips_gic_base;
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
-static DEFINE_SPINLOCK(gic_lock);
+static DEFINE_RAW_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
static int gic_shared_intrs;
static unsigned int gic_cpu_pin;
@@ -210,7 +210,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
pol = GIC_POL_FALLING_EDGE;
@@ -250,7 +250,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
else
irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
handle_level_irq, NULL);
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -268,7 +268,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
return -EINVAL;
/* Assumption : cpumask refers to a single CPU */
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
@@ -279,7 +279,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK;
}
@@ -357,12 +357,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
cd = irq_data_get_irq_chip_data(d);
cd->mask = false;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_rmask(BIT(intr));
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
@@ -375,12 +375,12 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
cd = irq_data_get_irq_chip_data(d);
cd->mask = true;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_smask(BIT(intr));
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_all_vpes_irq_cpu_online(void)
@@ -393,19 +393,21 @@ static void gic_all_vpes_irq_cpu_online(void)
unsigned long flags;
int i;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
unsigned int intr = local_intrs[i];
struct gic_all_vpes_chip_data *cd;
+ if (!gic_local_irq_is_routable(intr))
+ continue;
cd = &gic_all_vpes_chip_data[intr];
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
@@ -435,11 +437,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
data = irq_get_irq_data(virq);
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@@ -531,12 +533,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
- spin_lock_irqsave(&gic_lock, flags);
+ raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
- spin_unlock_irqrestore(&gic_lock, flags);
+ raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 83455ca72439..25cf4f80e767 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -244,132 +244,6 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
-/* MMP (ARMv5) */
-void __init icu_init_irq(void)
-{
- int irq;
-
- max_icu_nr = 1;
- mmp_icu_base = ioremap(0xd4282000, 0x1000);
- icu_data[0].conf_enable = mmp_conf.conf_enable;
- icu_data[0].conf_disable = mmp_conf.conf_disable;
- icu_data[0].conf_mask = mmp_conf.conf_mask;
- icu_data[0].nr_irqs = 64;
- icu_data[0].virq_base = 0;
- icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
- &irq_domain_simple_ops,
- &icu_data[0]);
- for (irq = 0; irq < 64; irq++) {
- icu_mask_irq(irq_get_irq_data(irq));
- irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
- }
- irq_set_default_host(icu_data[0].domain);
- set_handle_irq(mmp_handle_irq);
-}
-
-/* MMP2 (ARMv7) */
-void __init mmp2_init_icu(void)
-{
- int irq, end;
-
- max_icu_nr = 8;
- mmp_icu_base = ioremap(0xd4282000, 0x1000);
- icu_data[0].conf_enable = mmp2_conf.conf_enable;
- icu_data[0].conf_disable = mmp2_conf.conf_disable;
- icu_data[0].conf_mask = mmp2_conf.conf_mask;
- icu_data[0].nr_irqs = 64;
- icu_data[0].virq_base = 0;
- icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
- &irq_domain_simple_ops,
- &icu_data[0]);
- icu_data[1].reg_status = mmp_icu_base + 0x150;
- icu_data[1].reg_mask = mmp_icu_base + 0x168;
- icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
- icu_data[0].nr_irqs;
- icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
- icu_data[1].nr_irqs = 2;
- icu_data[1].cascade_irq = 4;
- icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
- icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
- icu_data[1].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[1]);
- icu_data[2].reg_status = mmp_icu_base + 0x154;
- icu_data[2].reg_mask = mmp_icu_base + 0x16c;
- icu_data[2].nr_irqs = 2;
- icu_data[2].cascade_irq = 5;
- icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
- icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
- icu_data[2].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[2]);
- icu_data[3].reg_status = mmp_icu_base + 0x180;
- icu_data[3].reg_mask = mmp_icu_base + 0x17c;
- icu_data[3].nr_irqs = 3;
- icu_data[3].cascade_irq = 9;
- icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
- icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
- icu_data[3].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[3]);
- icu_data[4].reg_status = mmp_icu_base + 0x158;
- icu_data[4].reg_mask = mmp_icu_base + 0x170;
- icu_data[4].nr_irqs = 5;
- icu_data[4].cascade_irq = 17;
- icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
- icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
- icu_data[4].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[4]);
- icu_data[5].reg_status = mmp_icu_base + 0x15c;
- icu_data[5].reg_mask = mmp_icu_base + 0x174;
- icu_data[5].nr_irqs = 15;
- icu_data[5].cascade_irq = 35;
- icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
- icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
- icu_data[5].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[5]);
- icu_data[6].reg_status = mmp_icu_base + 0x160;
- icu_data[6].reg_mask = mmp_icu_base + 0x178;
- icu_data[6].nr_irqs = 2;
- icu_data[6].cascade_irq = 51;
- icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
- icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
- icu_data[6].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[6]);
- icu_data[7].reg_status = mmp_icu_base + 0x188;
- icu_data[7].reg_mask = mmp_icu_base + 0x184;
- icu_data[7].nr_irqs = 2;
- icu_data[7].cascade_irq = 55;
- icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
- icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
- icu_data[7].virq_base, 0,
- &irq_domain_simple_ops,
- &icu_data[7]);
- end = icu_data[7].virq_base + icu_data[7].nr_irqs;
- for (irq = 0; irq < end; irq++) {
- icu_mask_irq(irq_get_irq_data(irq));
- if (irq == icu_data[1].cascade_irq ||
- irq == icu_data[2].cascade_irq ||
- irq == icu_data[3].cascade_irq ||
- irq == icu_data[4].cascade_irq ||
- irq == icu_data[5].cascade_irq ||
- irq == icu_data[6].cascade_irq ||
- irq == icu_data[7].cascade_irq) {
- irq_set_chip(irq, &icu_irq_chip);
- irq_set_chained_handler(irq, icu_mux_irq_demux);
- } else {
- irq_set_chip_and_handler(irq, &icu_irq_chip,
- handle_level_irq);
- }
- }
- irq_set_default_host(icu_data[0].domain);
- set_handle_irq(mmp2_handle_irq);
-}
-
-#ifdef CONFIG_OF
static int __init mmp_init_bases(struct device_node *node)
{
int ret, nr_irqs, irq, i = 0;
@@ -548,4 +422,3 @@ err:
return -EINVAL;
}
IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
-#endif
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 55cb6b5a686e..be9680645545 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -201,6 +201,7 @@ static int __init icoll_of_init(struct device_node *np,
stmp_reset_block(icoll_priv.ctrl);
icoll_add_domain(np, ICOLL_NUM_IRQS);
+ set_handle_irq(icoll_handle_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 6a3f7498ea8e..b5fa76ce5046 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -173,6 +173,16 @@ static struct irq_chip stm32_exti_h_chip_direct;
#define EXTI_INVALID_IRQ U8_MAX
#define STM32MP1_DESC_IRQ_SIZE (ARRAY_SIZE(stm32mp1_exti_banks) * IRQS_PER_BANK)
+/*
+ * Use some intentionally tricky logic here to initialize the whole array to
+ * EXTI_INVALID_IRQ, but then override certain fields, requiring us to indicate
+ * that we "know" that there are overrides in this structure, and we'll need to
+ * disable that warning from W=1 builds.
+ */
+__diag_push();
+__diag_ignore_all("-Woverride-init",
+ "logic to initialize all and then override some is OK");
+
static const u8 stm32mp1_desc_irq[] = {
/* default value */
[0 ... (STM32MP1_DESC_IRQ_SIZE - 1)] = EXTI_INVALID_IRQ,
@@ -208,6 +218,7 @@ static const u8 stm32mp1_desc_irq[] = {
[31] = 53,
[32] = 82,
[33] = 83,
+ [46] = 151,
[47] = 93,
[48] = 138,
[50] = 139,
@@ -266,6 +277,8 @@ static const u8 stm32mp13_desc_irq[] = {
[70] = 98,
};
+__diag_pop();
+
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
.exti_banks = stm32mp1_exti_banks,
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 55a037234df1..1c849814a491 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -312,14 +312,14 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
max_res = LPG_RESOLUTION_9BIT;
}
- min_period = (u64)NSEC_PER_SEC *
- div64_u64((1 << pwm_resolution_arr[0]), clk_rate_arr[clk_len - 1]);
+ min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]),
+ clk_rate_arr[clk_len - 1]);
if (period <= min_period)
return -EINVAL;
/* Limit period to largest possible value, to avoid overflows */
- max_period = (u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV *
- div64_u64((1 << LPG_MAX_M), 1024);
+ max_period = div64_u64((u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * (1 << LPG_MAX_M),
+ 1024);
if (period > max_period)
period = max_period;
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index c4a705c30331..fc6a12a51b40 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -98,6 +98,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
+ char *message;
void *data;
int ret;
@@ -113,12 +114,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
- mutex_lock(&tdev->mutex);
-
- tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
- if (!tdev->message)
+ message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!message)
return -ENOMEM;
+ mutex_lock(&tdev->mutex);
+
+ tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index aebb7ef10e63..5a79bb3c272f 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -275,7 +275,7 @@ struct bcache_device {
int (*cache_miss)(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors);
- int (*ioctl)(struct bcache_device *d, fmode_t mode,
+ int (*ioctl)(struct bcache_device *d, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
};
@@ -1004,11 +1004,11 @@ extern struct workqueue_struct *bch_flush_wq;
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
-extern struct kobj_type bch_cached_dev_ktype;
-extern struct kobj_type bch_flash_dev_ktype;
-extern struct kobj_type bch_cache_set_ktype;
-extern struct kobj_type bch_cache_set_internal_ktype;
-extern struct kobj_type bch_cache_ktype;
+extern const struct kobj_type bch_cached_dev_ktype;
+extern const struct kobj_type bch_flash_dev_ktype;
+extern const struct kobj_type bch_cache_set_ktype;
+extern const struct kobj_type bch_cache_set_internal_ktype;
+extern const struct kobj_type bch_cache_ktype;
void bch_cached_dev_release(struct kobject *kobj);
void bch_flash_dev_release(struct kobject *kobj);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 569f48958bde..fd121a61f17c 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -906,7 +906,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-static void bch_cannibalize_unlock(struct cache_set *c)
+void bch_cannibalize_unlock(struct cache_set *c)
{
spin_lock(&c->btree_cannibalize_lock);
if (c->btree_cache_alloc_lock == current) {
@@ -1111,10 +1111,12 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
struct btree *parent)
{
BKEY_PADDED(key) k;
- struct btree *b = ERR_PTR(-EAGAIN);
+ struct btree *b;
mutex_lock(&c->bucket_lock);
retry:
+ /* return ERR_PTR(-EAGAIN) when it fails */
+ b = ERR_PTR(-EAGAIN);
if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
goto err;
@@ -1159,7 +1161,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
{
struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
- if (!IS_ERR_OR_NULL(n)) {
+ if (!IS_ERR(n)) {
mutex_lock(&n->write_lock);
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
bkey_copy_key(&n->key, &b->key);
@@ -1361,7 +1363,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl);
- while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
+ while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
@@ -1373,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
for (i = 0; i < nodes; i++) {
new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
- if (IS_ERR_OR_NULL(new_nodes[i]))
+ if (IS_ERR(new_nodes[i]))
goto out_nocoalesce;
}
@@ -1508,7 +1510,7 @@ out_nocoalesce:
bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
- if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ if (!IS_ERR(new_nodes[i])) {
btree_node_free(new_nodes[i]);
rw_unlock(true, new_nodes[i]);
}
@@ -1690,7 +1692,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
if (should_rewrite) {
n = btree_node_alloc_replacement(b, NULL);
- if (!IS_ERR_OR_NULL(n)) {
+ if (!IS_ERR(n)) {
bch_btree_node_write_sync(n);
bch_btree_set_root(n);
@@ -1989,6 +1991,15 @@ static int bch_btree_check_thread(void *arg)
c->gc_stats.nodes++;
bch_btree_op_init(&op, 0);
ret = bcache_btree(check_recurse, p, c->root, &op);
+ /*
+ * The op may be added to cache_set's btree_cache_wait
+ * in mca_cannibalize(), must ensure it is removed from
+ * the list and release btree_cache_alloc_lock before
+ * free op memory.
+ * Otherwise, the btree_cache_wait will be damaged.
+ */
+ bch_cannibalize_unlock(c);
+ finish_wait(&c->btree_cache_wait, &(&op)->wait);
if (ret)
goto out;
}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 17b1d201cec0..45d64b54115a 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -282,6 +282,7 @@ void bch_initial_gc_finish(struct cache_set *c);
void bch_moving_gc(struct cache_set *c);
int bch_btree_check(struct cache_set *c);
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
+void bch_cannibalize_unlock(struct cache_set *c);
static inline void wake_up_gc(struct cache_set *c)
{
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 67a2e29e0b40..a9b1f3896249 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1228,7 +1228,7 @@ void cached_dev_submit_bio(struct bio *bio)
detached_dev_do_request(d, bio, orig_bdev, start_time);
}
-static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
+static int cached_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
@@ -1318,7 +1318,7 @@ void flash_dev_submit_bio(struct bio *bio)
continue_at(cl, search_free, NULL);
}
-static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
+static int flash_dev_ioctl(struct bcache_device *d, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
return -ENOTTY;
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index bd3afc856d53..21b445f8af15 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -18,7 +18,6 @@ struct cache_stats {
unsigned long cache_misses;
unsigned long cache_bypass_hits;
unsigned long cache_bypass_misses;
- unsigned long cache_readaheads;
unsigned long cache_miss_collisions;
unsigned long sectors_bypassed;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 7e9d19fd21dd..e2a803683105 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -732,9 +732,9 @@ out:
/* Bcache device */
-static int open_dev(struct block_device *b, fmode_t mode)
+static int open_dev(struct gendisk *disk, blk_mode_t mode)
{
- struct bcache_device *d = b->bd_disk->private_data;
+ struct bcache_device *d = disk->private_data;
if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO;
@@ -743,14 +743,14 @@ static int open_dev(struct block_device *b, fmode_t mode)
return 0;
}
-static void release_dev(struct gendisk *b, fmode_t mode)
+static void release_dev(struct gendisk *b)
{
struct bcache_device *d = b->private_data;
closure_put(&d->cl);
}
-static int ioctl_dev(struct block_device *b, fmode_t mode,
+static int ioctl_dev(struct block_device *b, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct bcache_device *d = b->bd_disk->private_data;
@@ -1369,7 +1369,7 @@ static void cached_dev_free(struct closure *cl)
put_page(virt_to_page(dc->sb_disk));
if (!IS_ERR_OR_NULL(dc->bdev))
- blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ blkdev_put(dc->bdev, bcache_kobj);
wake_up(&unregister_wait);
@@ -1723,7 +1723,7 @@ static void cache_set_flush(struct closure *cl)
if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
- if (!IS_ERR_OR_NULL(c->root))
+ if (!IS_ERR(c->root))
list_add(&c->root->list, &c->btree_cache);
/*
@@ -2087,7 +2087,7 @@ static int run_cache_set(struct cache_set *c)
err = "cannot allocate new btree root";
c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
- if (IS_ERR_OR_NULL(c->root))
+ if (IS_ERR(c->root))
goto err;
mutex_lock(&c->root->write_lock);
@@ -2218,7 +2218,7 @@ void bch_cache_release(struct kobject *kobj)
put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
- blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ blkdev_put(ca->bdev, bcache_kobj);
kfree(ca);
module_put(THIS_MODULE);
@@ -2359,7 +2359,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
* call blkdev_put() to bdev in bch_cache_release(). So we
* explicitly call blkdev_put() here.
*/
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ blkdev_put(bdev, bcache_kobj);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else if (ret == -EPERM)
@@ -2461,7 +2461,7 @@ static void register_bdev_worker(struct work_struct *work)
if (!dc) {
fail = true;
put_page(virt_to_page(args->sb_disk));
- blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(args->bdev, bcache_kobj);
goto out;
}
@@ -2491,7 +2491,7 @@ static void register_cache_worker(struct work_struct *work)
if (!ca) {
fail = true;
put_page(virt_to_page(args->sb_disk));
- blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(args->bdev, bcache_kobj);
goto out;
}
@@ -2558,9 +2558,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
- bdev = blkdev_get_by_path(strim(path),
- FMODE_READ|FMODE_WRITE|FMODE_EXCL,
- sb);
+ bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ | BLK_OPEN_WRITE,
+ bcache_kobj, NULL);
if (IS_ERR(bdev)) {
if (bdev == ERR_PTR(-EBUSY)) {
dev_t dev;
@@ -2648,7 +2647,7 @@ async_done:
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
- blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(bdev, register_bcache);
out_free_sb:
kfree(sb);
out_free_path:
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6f677059214..0e2c1880f60b 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1111,26 +1111,25 @@ SHOW(__bch_cache)
vfree(p);
- ret = scnprintf(buf, PAGE_SIZE,
- "Unused: %zu%%\n"
- "Clean: %zu%%\n"
- "Dirty: %zu%%\n"
- "Metadata: %zu%%\n"
- "Average: %llu\n"
- "Sectors per Q: %zu\n"
- "Quantiles: [",
- unused * 100 / (size_t) ca->sb.nbuckets,
- available * 100 / (size_t) ca->sb.nbuckets,
- dirty * 100 / (size_t) ca->sb.nbuckets,
- meta * 100 / (size_t) ca->sb.nbuckets, sum,
- n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
+ ret = sysfs_emit(buf,
+ "Unused: %zu%%\n"
+ "Clean: %zu%%\n"
+ "Dirty: %zu%%\n"
+ "Metadata: %zu%%\n"
+ "Average: %llu\n"
+ "Sectors per Q: %zu\n"
+ "Quantiles: [",
+ unused * 100 / (size_t) ca->sb.nbuckets,
+ available * 100 / (size_t) ca->sb.nbuckets,
+ dirty * 100 / (size_t) ca->sb.nbuckets,
+ meta * 100 / (size_t) ca->sb.nbuckets, sum,
+ n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
for (i = 0; i < ARRAY_SIZE(q); i++)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "%u ", q[i]);
+ ret += sysfs_emit_at(buf, ret, "%u ", q[i]);
ret--;
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
+ ret += sysfs_emit_at(buf, ret, "]\n");
return ret;
}
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index a2ff6447b699..65b8bd975ab1 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -3,7 +3,7 @@
#define _BCACHE_SYSFS_H_
#define KTYPE(type) \
-struct kobj_type type ## _ktype = { \
+const struct kobj_type type ## _ktype = { \
.release = type ## _release, \
.sysfs_ops = &((const struct sysfs_ops) { \
.show = type ## _show, \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d4a5fc0650bb..24c049067f61 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -890,6 +890,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
if (ret < 0)
pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+ /*
+ * The op may be added to cache_set's btree_cache_wait
+ * in mca_cannibalize(), must ensure it is removed from
+ * the list and release btree_cache_alloc_lock before
+ * free op memory.
+ * Otherwise, the btree_cache_wait will be damaged.
+ */
+ bch_cannibalize_unlock(c);
+ finish_wait(&c->btree_cache_wait, &(&op.op)->wait);
+
return ret;
}
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9e0c69958587..acffed750e3e 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1828,7 +1828,7 @@ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
* cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
* shrinker associated with the block manager's bufio client vs cmd root_lock).
- * - must take shrinker_mutex without holding cmd->root_lock
+ * - must take shrinker_rwsem without holding cmd->root_lock
*/
new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 872896218550..911f73f7ebba 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2051,8 +2051,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
if (!at_least_one_arg(as, error))
return -EINVAL;
- r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &ca->metadata_dev);
+ r = dm_get_device(ca->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->metadata_dev);
if (r) {
*error = "Error opening metadata device";
return r;
@@ -2074,8 +2074,8 @@ static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
if (!at_least_one_arg(as, error))
return -EINVAL;
- r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &ca->cache_dev);
+ r = dm_get_device(ca->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->cache_dev);
if (r) {
*error = "Error opening cache device";
return r;
@@ -2093,8 +2093,8 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
if (!at_least_one_arg(as, error))
return -EINVAL;
- r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &ca->origin_dev);
+ r = dm_get_device(ca->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE, &ca->origin_dev);
if (r) {
*error = "Error opening origin device";
return r;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index f467cdb5a022..94b2fc33f64b 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1683,8 +1683,8 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
int r;
sector_t metadata_dev_size;
- r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &clone->metadata_dev);
+ r = dm_get_device(clone->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->metadata_dev);
if (r) {
*error = "Error opening metadata device";
return r;
@@ -1703,8 +1703,8 @@ static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **err
int r;
sector_t dest_dev_size;
- r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &clone->dest_dev);
+ r = dm_get_device(clone->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE, &clone->dest_dev);
if (r) {
*error = "Error opening destination device";
return r;
@@ -1725,7 +1725,7 @@ static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **e
int r;
sector_t source_dev_size;
- r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
+ r = dm_get_device(clone->ti, dm_shift_arg(as), BLK_OPEN_READ,
&clone->source_dev);
if (r) {
*error = "Error opening source device";
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index aecab0c0720f..ce913ad91a52 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -207,11 +207,10 @@ struct dm_table {
unsigned integrity_added:1;
/*
- * Indicates the rw permissions for the new logical
- * device. This should be a combination of FMODE_READ
- * and FMODE_WRITE.
+ * Indicates the rw permissions for the new logical device. This
+ * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE.
*/
- fmode_t mode;
+ blk_mode_t mode;
/* a list of devices used by this table */
struct list_head devices;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8b47b913ee83..09e37ebf7cc8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1693,8 +1693,7 @@ retry:
len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- bio_add_page(clone, page, len, 0);
-
+ __bio_add_page(clone, page, len, 0);
remaining_size -= len;
}
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 0d70914217ee..6acfa5bf97a4 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1482,14 +1482,16 @@ static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv)
era->ti = ti;
- r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
+ r = dm_get_device(ti, argv[0], BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &era->metadata_dev);
if (r) {
ti->error = "Error opening metadata device";
era_destroy(era);
return -EINVAL;
}
- r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
+ r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &era->origin_dev);
if (r) {
ti->error = "Error opening data device";
era_destroy(era);
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index d369457dbed0..2a71bcdba92d 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -293,8 +293,10 @@ static int __init dm_init_init(void)
for (i = 0; i < ARRAY_SIZE(waitfor); i++) {
if (waitfor[i]) {
+ dev_t dev;
+
DMINFO("waiting for device %s ...", waitfor[i]);
- while (!dm_get_dev_t(waitfor[i]))
+ while (early_lookup_bdev(waitfor[i], &dev))
fsleep(5000);
}
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index cc77cf3d4109..6d301019e5e3 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -861,7 +861,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
table = dm_get_inactive_table(md, &srcu_idx);
if (table) {
- if (!(dm_table_get_mode(table) & FMODE_WRITE))
+ if (!(dm_table_get_mode(table) & BLK_OPEN_WRITE))
param->flags |= DM_READONLY_FLAG;
param->target_count = table->num_targets;
}
@@ -1168,13 +1168,10 @@ static int do_resume(struct dm_ioctl *param)
/* Do we need to load a new map ? */
if (new_map) {
sector_t old_size, new_size;
- int srcu_idx;
/* Suspend if it isn't already suspended */
- old_map = dm_get_live_table(md, &srcu_idx);
- if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
+ if (param->flags & DM_SKIP_LOCKFS_FLAG)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
- dm_put_live_table(md, srcu_idx);
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
if (!dm_suspended_md(md))
@@ -1192,7 +1189,7 @@ static int do_resume(struct dm_ioctl *param)
if (old_size && new_size && old_size != new_size)
need_resize_uevent = true;
- if (dm_table_get_mode(new_map) & FMODE_WRITE)
+ if (dm_table_get_mode(new_map) & BLK_OPEN_WRITE)
set_disk_ro(dm_disk(md), 0);
else
set_disk_ro(dm_disk(md), 1);
@@ -1381,12 +1378,12 @@ static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_
return 0;
}
-static inline fmode_t get_mode(struct dm_ioctl *param)
+static inline blk_mode_t get_mode(struct dm_ioctl *param)
{
- fmode_t mode = FMODE_READ | FMODE_WRITE;
+ blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
if (param->flags & DM_READONLY_FLAG)
- mode = FMODE_READ;
+ mode = BLK_OPEN_READ;
return mode;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c8821fcb8299..8846bf510a35 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3750,11 +3750,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
* canceling read-auto mode
*/
mddev->ro = 0;
- if (!mddev->suspended && mddev->sync_thread)
+ if (!mddev->suspended)
md_wakeup_thread(mddev->sync_thread);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (!mddev->suspended && mddev->thread)
+ if (!mddev->suspended)
md_wakeup_thread(mddev->thread);
return 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 9c49f53760d0..bf7a574499a3 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1241,9 +1241,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
- dev_t origin_dev, cow_dev;
unsigned int args_used, num_flush_bios = 1;
- fmode_t origin_mode = FMODE_READ;
+ blk_mode_t origin_mode = BLK_OPEN_READ;
if (argc < 4) {
ti->error = "requires 4 or more arguments";
@@ -1253,7 +1252,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (dm_target_is_snapshot_merge(ti)) {
num_flush_bios = 2;
- origin_mode = FMODE_WRITE;
+ origin_mode = BLK_OPEN_WRITE;
}
s = kzalloc(sizeof(*s), GFP_KERNEL);
@@ -1279,24 +1278,21 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot get origin device";
goto bad_origin;
}
- origin_dev = s->origin->bdev->bd_dev;
cow_path = argv[0];
argv++;
argc--;
- cow_dev = dm_get_dev_t(cow_path);
- if (cow_dev && cow_dev == origin_dev) {
- ti->error = "COW device cannot be the same as origin device";
- r = -EINVAL;
- goto bad_cow;
- }
-
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
goto bad_cow;
}
+ if (s->cow->bdev && s->cow->bdev == s->origin->bdev) {
+ ti->error = "COW device cannot be the same as origin device";
+ r = -EINVAL;
+ goto bad_store;
+ }
r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
if (r) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 1398f1d6e83e..7d208b2b1a19 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -126,7 +126,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
return 0;
}
-int dm_table_create(struct dm_table **result, fmode_t mode,
+int dm_table_create(struct dm_table **result, blk_mode_t mode,
unsigned int num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -304,7 +304,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* device and not to touch the existing bdev field in case
* it is accessed concurrently.
*/
-static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+static int upgrade_mode(struct dm_dev_internal *dd, blk_mode_t new_mode,
struct mapped_device *md)
{
int r;
@@ -324,23 +324,13 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
}
/*
- * Convert the path to a device
- */
-dev_t dm_get_dev_t(const char *path)
-{
- dev_t dev;
-
- if (lookup_bdev(path, &dev))
- dev = name_to_dev_t(path);
- return dev;
-}
-EXPORT_SYMBOL_GPL(dm_get_dev_t);
-
-/*
* Add a device to the list, or just increment the usage count if
* it's already present.
+ *
+ * Note: the __ref annotation is because this function can call the __init
+ * marked early_lookup_bdev when called during early boot code from dm-init.c.
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
struct dm_dev **result)
{
int r;
@@ -358,9 +348,13 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
if (MAJOR(dev) != major || MINOR(dev) != minor)
return -EOVERFLOW;
} else {
- dev = dm_get_dev_t(path);
- if (!dev)
- return -ENODEV;
+ r = lookup_bdev(path, &dev);
+#ifndef MODULE
+ if (r && system_state < SYSTEM_RUNNING)
+ r = early_lookup_bdev(path, &dev);
+#endif
+ if (r)
+ return r;
}
if (dev == disk_devt(t->md->disk))
return -EINVAL;
@@ -668,7 +662,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->singleton = true;
}
- if (dm_target_always_writeable(ti->type) && !(t->mode & FMODE_WRITE)) {
+ if (dm_target_always_writeable(ti->type) &&
+ !(t->mode & BLK_OPEN_WRITE)) {
ti->error = "target type may not be included in a read-only table";
goto bad;
}
@@ -2039,7 +2034,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t)
return &t->devices;
}
-fmode_t dm_table_get_mode(struct dm_table *t)
+blk_mode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 9f5cb52c5763..9dd0409848ab 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1756,13 +1756,15 @@ int dm_thin_remove_range(struct dm_thin_device *td,
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
{
- int r;
+ int r = -EINVAL;
uint32_t ref_count;
down_read(&pmd->root_lock);
- r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
- if (!r)
- *result = (ref_count > 1);
+ if (!pmd->fail_io) {
+ r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+ if (!r)
+ *result = (ref_count > 1);
+ }
up_read(&pmd->root_lock);
return r;
@@ -1770,10 +1772,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *re
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
{
- int r = 0;
+ int r = -EINVAL;
pmd_write_lock(pmd);
- r = dm_sm_inc_blocks(pmd->data_sm, b, e);
+ if (!pmd->fail_io)
+ r = dm_sm_inc_blocks(pmd->data_sm, b, e);
pmd_write_unlock(pmd);
return r;
@@ -1781,10 +1784,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
{
- int r = 0;
+ int r = -EINVAL;
pmd_write_lock(pmd);
- r = dm_sm_dec_blocks(pmd->data_sm, b, e);
+ if (!pmd->fail_io)
+ r = dm_sm_dec_blocks(pmd->data_sm, b, e);
pmd_write_unlock(pmd);
return r;
@@ -1887,7 +1891,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
* pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
* shrinker associated with the block manager's bufio client vs pmd root_lock).
- * - must take shrinker_mutex without holding pmd->root_lock
+ * - must take shrinker_rwsem without holding pmd->root_lock
*/
new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_MAX_CONCURRENT_LOCKS);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2b13c949bd72..f1d0dcb9db22 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -401,8 +401,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT,
- &op->bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -3301,7 +3300,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
unsigned long block_size;
dm_block_t low_water_blocks;
struct dm_dev *metadata_dev;
- fmode_t metadata_mode;
+ blk_mode_t metadata_mode;
/*
* FIXME Remove validation from scope of lock.
@@ -3334,7 +3333,8 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto out_unlock;
- metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+ metadata_mode = BLK_OPEN_READ |
+ ((pf.mode == PM_READ_ONLY) ? 0 : BLK_OPEN_WRITE);
r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
if (r) {
ti->error = "Error opening metadata block device";
@@ -3342,7 +3342,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
warn_if_metadata_device_too_big(metadata_dev->bdev);
- r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
+ r = dm_get_device(ti, argv[1], BLK_OPEN_READ | BLK_OPEN_WRITE, &data_dev);
if (r) {
ti->error = "Error getting data device";
goto out_metadata;
@@ -4223,7 +4223,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_origin_dev;
}
- r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
+ r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &origin_dev);
if (r) {
ti->error = "Error opening origin device";
goto bad_origin_dev;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index a9ee2faa75a2..3ef9f018da60 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -607,7 +607,7 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
(*argc)--;
if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
- r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev);
+ r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
if (r) {
ti->error = "FEC device lookup failed";
return r;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e35c16e06d06..26adcfea0302 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1196,7 +1196,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
+ if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) {
ti->error = "Device must be readonly";
r = -EINVAL;
goto bad;
@@ -1225,13 +1225,13 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->version = num;
- r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
+ r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev);
if (r) {
ti->error = "Data device lookup failed";
goto bad;
}
- r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
+ r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev);
if (r) {
ti->error = "Hash device lookup failed";
goto bad;
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8f0896a6990b..9d3cca8e3dc9 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -577,7 +577,7 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
+ __bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
return mblk;
@@ -728,7 +728,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
- bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
+ __bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
return 0;
@@ -752,7 +752,7 @@ static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op,
bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
GFP_NOIO);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
+ __bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3b694ba3a106..ae0a88defb41 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -310,13 +310,13 @@ int dm_deleting_md(struct mapped_device *md)
return test_bit(DMF_DELETING, &md->flags);
}
-static int dm_blk_open(struct block_device *bdev, fmode_t mode)
+static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
- md = bdev->bd_disk->private_data;
+ md = disk->private_data;
if (!md)
goto out;
@@ -334,7 +334,7 @@ out:
return md ? 0 : -ENXIO;
}
-static void dm_blk_close(struct gendisk *disk, fmode_t mode)
+static void dm_blk_close(struct gendisk *disk)
{
struct mapped_device *md;
@@ -448,7 +448,7 @@ static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
dm_put_live_table(md, srcu_idx);
}
-static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
+static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
@@ -734,7 +734,7 @@ static char *_dm_claim_ptr = "I belong to device-mapper";
* Open a table device so we can use it as a map destination.
*/
static struct table_device *open_table_device(struct mapped_device *md,
- dev_t dev, fmode_t mode)
+ dev_t dev, blk_mode_t mode)
{
struct table_device *td;
struct block_device *bdev;
@@ -746,7 +746,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
- bdev = blkdev_get_by_dev(dev, mode | FMODE_EXCL, _dm_claim_ptr);
+ bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
if (IS_ERR(bdev)) {
r = PTR_ERR(bdev);
goto out_free_td;
@@ -771,7 +771,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
return td;
out_blkdev_put:
- blkdev_put(bdev, mode | FMODE_EXCL);
+ blkdev_put(bdev, _dm_claim_ptr);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -784,14 +784,14 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
+ blkdev_put(td->dm_dev.bdev, _dm_claim_ptr);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
- fmode_t mode)
+ blk_mode_t mode)
{
struct table_device *td;
@@ -802,7 +802,7 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
return NULL;
}
-int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
struct dm_dev **result)
{
struct table_device *td;
@@ -1172,7 +1172,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
}
static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
- unsigned int max_granularity)
+ unsigned int max_granularity,
+ unsigned int max_sectors)
{
sector_t target_offset = dm_target_offset(ti, sector);
sector_t len = max_io_len_target_boundary(ti, target_offset);
@@ -1186,13 +1187,13 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
if (!max_granularity)
return len;
return min_t(sector_t, len,
- min(queue_max_sectors(ti->table->md->queue),
+ min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
blk_chunk_sectors_left(target_offset, max_granularity)));
}
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
{
- return __max_io_len(ti, sector, ti->max_io_len);
+ return __max_io_len(ti, sector, ti->max_io_len, 0);
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1581,12 +1582,13 @@ static void __send_empty_flush(struct clone_info *ci)
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned int num_bios,
- unsigned int max_granularity)
+ unsigned int max_granularity,
+ unsigned int max_sectors)
{
unsigned int len, bios;
len = min_t(sector_t, ci->sector_count,
- __max_io_len(ti, ci->sector, max_granularity));
+ __max_io_len(ti, ci->sector, max_granularity, max_sectors));
atomic_add(num_bios, &ci->io->io_count);
bios = __send_duplicate_bios(ci, ti, num_bios, &len);
@@ -1623,23 +1625,27 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
{
unsigned int num_bios = 0;
unsigned int max_granularity = 0;
+ unsigned int max_sectors = 0;
struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
num_bios = ti->num_discard_bios;
+ max_sectors = limits->max_discard_sectors;
if (ti->max_discard_granularity)
- max_granularity = limits->max_discard_sectors;
+ max_granularity = max_sectors;
break;
case REQ_OP_SECURE_ERASE:
num_bios = ti->num_secure_erase_bios;
+ max_sectors = limits->max_secure_erase_sectors;
if (ti->max_secure_erase_granularity)
- max_granularity = limits->max_secure_erase_sectors;
+ max_granularity = max_sectors;
break;
case REQ_OP_WRITE_ZEROES:
num_bios = ti->num_write_zeroes_bios;
+ max_sectors = limits->max_write_zeroes_sectors;
if (ti->max_write_zeroes_granularity)
- max_granularity = limits->max_write_zeroes_sectors;
+ max_granularity = max_sectors;
break;
default:
break;
@@ -1654,7 +1660,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
if (unlikely(!num_bios))
return BLK_STS_NOTSUPP;
- __send_changing_extent_only(ci, ti, num_bios, max_granularity);
+ __send_changing_extent_only(ci, ti, num_bios,
+ max_granularity, max_sectors);
return BLK_STS_OK;
}
@@ -2808,6 +2815,10 @@ retry:
}
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ if (!map) {
+ /* avoid deadlock with fs/namespace.c:do_mount() */
+ suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+ }
r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
if (r)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a856e0aee73b..63d9010d8e61 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -203,7 +203,7 @@ int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
-int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
struct dm_dev **result);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 91836e6de326..6eaa0eab40f9 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -147,7 +147,8 @@ static void __init md_setup_drive(struct md_setup_args *args)
if (p)
*p++ = 0;
- dev = name_to_dev_t(devname);
+ if (early_lookup_bdev(devname, &dev))
+ dev = 0;
if (strncmp(devname, "/dev/", 5) == 0)
devname += 5;
snprintf(comp_name, 63, "/dev/%s", devname);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index bc8d7565171d..1ff712889a3b 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -54,14 +54,7 @@ __acquires(bitmap->lock)
{
unsigned char *mappage;
- if (page >= bitmap->pages) {
- /* This can happen if bitmap_start_sync goes beyond
- * End-of-device while looking for a whole page.
- * It is harmless.
- */
- return -EINVAL;
- }
-
+ WARN_ON_ONCE(page >= bitmap->pages);
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
return 0;
@@ -1023,7 +1016,6 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
return set;
}
-
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
@@ -1033,8 +1025,7 @@ void md_bitmap_unplug(struct bitmap *bitmap)
int dirty, need_write;
int writing = 0;
- if (!bitmap || !bitmap->storage.filemap ||
- test_bit(BITMAP_STALE, &bitmap->flags))
+ if (!md_bitmap_enabled(bitmap))
return;
/* look at each page to see if there are any set bits that need to be
@@ -1063,6 +1054,35 @@ void md_bitmap_unplug(struct bitmap *bitmap)
}
EXPORT_SYMBOL(md_bitmap_unplug);
+struct bitmap_unplug_work {
+ struct work_struct work;
+ struct bitmap *bitmap;
+ struct completion *done;
+};
+
+static void md_bitmap_unplug_fn(struct work_struct *work)
+{
+ struct bitmap_unplug_work *unplug_work =
+ container_of(work, struct bitmap_unplug_work, work);
+
+ md_bitmap_unplug(unplug_work->bitmap);
+ complete(unplug_work->done);
+}
+
+void md_bitmap_unplug_async(struct bitmap *bitmap)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct bitmap_unplug_work unplug_work;
+
+ INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn);
+ unplug_work.bitmap = bitmap;
+ unplug_work.done = &done;
+
+ queue_work(md_bitmap_wq, &unplug_work.work);
+ wait_for_completion(&done);
+}
+EXPORT_SYMBOL(md_bitmap_unplug_async);
+
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
* the in-memory bitmap from the on-disk bitmap -- also, sets up the
@@ -1241,11 +1261,28 @@ static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
sector_t offset, sector_t *blocks,
int create);
+static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout,
+ bool force)
+{
+ struct md_thread *thread;
+
+ rcu_read_lock();
+ thread = rcu_dereference(mddev->thread);
+
+ if (!thread)
+ goto out;
+
+ if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT)
+ thread->timeout = timeout;
+
+out:
+ rcu_read_unlock();
+}
+
/*
* bitmap daemon -- periodically wakes up to clean bits and flush pages
* out to disk
*/
-
void md_bitmap_daemon_work(struct mddev *mddev)
{
struct bitmap *bitmap;
@@ -1269,7 +1306,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
goto done;
}
bitmap->allclean = 1;
@@ -1366,8 +1403,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
done:
if (bitmap->allclean == 0)
- mddev->thread->timeout =
- mddev->bitmap_info.daemon_sleep;
+ mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
mutex_unlock(&mddev->bitmap_info.mutex);
}
@@ -1387,6 +1423,14 @@ __acquires(bitmap->lock)
sector_t csize;
int err;
+ if (page >= bitmap->pages) {
+ /*
+ * This can happen if bitmap_start_sync goes beyond
+ * End-of-device while looking for a whole page or
+ * user set a huge number to sysfs bitmap_set_bits.
+ */
+ return NULL;
+ }
err = md_bitmap_checkpage(bitmap, page, create, 0);
if (bitmap->bp[page].hijacked ||
@@ -1820,8 +1864,7 @@ void md_bitmap_destroy(struct mddev *mddev)
mddev->bitmap = NULL; /* disconnect from the md device */
spin_unlock(&mddev->lock);
mutex_unlock(&mddev->bitmap_info.mutex);
- if (mddev->thread)
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
md_bitmap_free(bitmap);
}
@@ -1964,7 +2007,7 @@ int md_bitmap_load(struct mddev *mddev)
/* Kick recovery in case any bits were set */
set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
- mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
+ mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
md_wakeup_thread(mddev->thread);
md_bitmap_update_sb(bitmap);
@@ -2469,17 +2512,11 @@ timeout_store(struct mddev *mddev, const char *buf, size_t len)
timeout = MAX_SCHEDULE_TIMEOUT-1;
if (timeout < 1)
timeout = 1;
+
mddev->bitmap_info.daemon_sleep = timeout;
- if (mddev->thread) {
- /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
- * the bitmap is all clean and we don't need to
- * adjust the timeout right now
- */
- if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
- mddev->thread->timeout = timeout;
- md_wakeup_thread(mddev->thread);
- }
- }
+ mddev_set_timeout(mddev, timeout, false);
+ md_wakeup_thread(mddev->thread);
+
return len;
}
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index cfd7395de8fd..8a3788c9bfef 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -264,6 +264,7 @@ void md_bitmap_sync_with_cluster(struct mddev *mddev,
sector_t new_lo, sector_t new_hi);
void md_bitmap_unplug(struct bitmap *bitmap);
+void md_bitmap_unplug_async(struct bitmap *bitmap);
void md_bitmap_daemon_work(struct mddev *mddev);
int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
@@ -273,6 +274,13 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
sector_t *lo, sector_t *hi, bool clear_bits);
void md_bitmap_free(struct bitmap *bitmap);
void md_bitmap_wait_behind_writes(struct mddev *mddev);
+
+static inline bool md_bitmap_enabled(struct bitmap *bitmap)
+{
+ return bitmap && bitmap->storage.filemap &&
+ !test_bit(BITMAP_STALE, &bitmap->flags);
+}
+
#endif
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 10e0c5381d01..3d9fd74233df 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -75,14 +75,14 @@ struct md_cluster_info {
sector_t suspend_hi;
int suspend_from; /* the slot which broadcast suspend_lo/hi */
- struct md_thread *recovery_thread;
+ struct md_thread __rcu *recovery_thread;
unsigned long recovery_map;
/* communication loc resources */
struct dlm_lock_resource *ack_lockres;
struct dlm_lock_resource *message_lockres;
struct dlm_lock_resource *token_lockres;
struct dlm_lock_resource *no_new_dev_lockres;
- struct md_thread *recv_thread;
+ struct md_thread __rcu *recv_thread;
struct completion newdisk_completion;
wait_queue_head_t wait;
unsigned long state;
@@ -362,8 +362,8 @@ static void __recover_slot(struct mddev *mddev, int slot)
set_bit(slot, &cinfo->recovery_map);
if (!cinfo->recovery_thread) {
- cinfo->recovery_thread = md_register_thread(recover_bitmaps,
- mddev, "recover");
+ rcu_assign_pointer(cinfo->recovery_thread,
+ md_register_thread(recover_bitmaps, mddev, "recover"));
if (!cinfo->recovery_thread) {
pr_warn("md-cluster: Could not create recovery thread\n");
return;
@@ -526,11 +526,15 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
{
int got_lock = 0;
+ struct md_thread *thread;
struct md_cluster_info *cinfo = mddev->cluster_info;
mddev->good_device_nr = le32_to_cpu(msg->raid_slot);
dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
- wait_event(mddev->thread->wqueue,
+
+ /* daemaon thread must exist */
+ thread = rcu_dereference_protected(mddev->thread, true);
+ wait_event(thread->wqueue,
(got_lock = mddev_trylock(mddev)) ||
test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state));
md_reload_sb(mddev, mddev->good_device_nr);
@@ -889,7 +893,8 @@ static int join(struct mddev *mddev, int nodes)
}
/* Initiate the communication resources */
ret = -ENOMEM;
- cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
+ rcu_assign_pointer(cinfo->recv_thread,
+ md_register_thread(recv_daemon, mddev, "cluster_recv"));
if (!cinfo->recv_thread) {
pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
goto err;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index 66edf5e72bd6..92c45be203d7 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -400,8 +400,8 @@ static int multipath_run (struct mddev *mddev)
if (ret)
goto out_free_conf;
- mddev->thread = md_register_thread(multipathd, mddev,
- "multipath");
+ rcu_assign_pointer(mddev->thread,
+ md_register_thread(multipathd, mddev, "multipath"));
if (!mddev->thread)
goto out_free_conf;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8e344b4b3444..cf3733c90c47 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -70,11 +70,7 @@
#include "md-bitmap.h"
#include "md-cluster.h"
-/* pers_list is a list of registered personalities protected
- * by pers_lock.
- * pers_lock does extra service to protect accesses to
- * mddev->thread when the mutex cannot be held.
- */
+/* pers_list is a list of registered personalities protected by pers_lock. */
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
@@ -87,23 +83,13 @@ static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
-static struct workqueue_struct *md_rdev_misc_wq;
+struct workqueue_struct *md_bitmap_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
static void mddev_detach(struct mddev *mddev);
-
-enum md_ro_state {
- MD_RDWR,
- MD_RDONLY,
- MD_AUTO_READ,
- MD_MAX_STATE
-};
-
-static bool md_is_rdwr(struct mddev *mddev)
-{
- return (mddev->ro == MD_RDWR);
-}
+static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
+static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
/*
* Default number of read corrections we'll attempt on an rdev
@@ -360,10 +346,6 @@ EXPORT_SYMBOL_GPL(md_new_event);
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
-static bool is_md_suspended(struct mddev *mddev)
-{
- return percpu_ref_is_dying(&mddev->active_io);
-}
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
@@ -457,13 +439,19 @@ static void md_submit_bio(struct bio *bio)
*/
void mddev_suspend(struct mddev *mddev)
{
- WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
- lockdep_assert_held(&mddev->reconfig_mutex);
+ struct md_thread *thread = rcu_dereference_protected(mddev->thread,
+ lockdep_is_held(&mddev->reconfig_mutex));
+
+ WARN_ON_ONCE(thread && current == thread->tsk);
if (mddev->suspended++)
return;
wake_up(&mddev->sb_wait);
set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
percpu_ref_kill(&mddev->active_io);
+
+ if (mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+
wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
mddev->pers->quiesce(mddev, 1);
clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
@@ -655,9 +643,11 @@ void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
+ mutex_init(&mddev->delete_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
+ INIT_LIST_HEAD(&mddev->deleting);
timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
@@ -759,6 +749,24 @@ static void mddev_free(struct mddev *mddev)
static const struct attribute_group md_redundancy_group;
+static void md_free_rdev(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ struct md_rdev *tmp;
+
+ mutex_lock(&mddev->delete_mutex);
+ if (list_empty(&mddev->deleting))
+ goto out;
+
+ list_for_each_entry_safe(rdev, tmp, &mddev->deleting, same_set) {
+ list_del_init(&rdev->same_set);
+ kobject_del(&rdev->kobj);
+ export_rdev(rdev, mddev);
+ }
+out:
+ mutex_unlock(&mddev->delete_mutex);
+}
+
void mddev_unlock(struct mddev *mddev)
{
if (mddev->to_remove) {
@@ -800,13 +808,10 @@ void mddev_unlock(struct mddev *mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
- /* As we've dropped the mutex we need a spinlock to
- * make sure the thread doesn't disappear
- */
- spin_lock(&pers_lock);
+ md_free_rdev(mddev);
+
md_wakeup_thread(mddev->thread);
wake_up(&mddev->sb_wait);
- spin_unlock(&pers_lock);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -938,7 +943,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector;
- bio_add_page(bio, page, size, 0);
+ __bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -979,7 +984,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
else
bio.bi_iter.bi_sector = sector + rdev->data_offset;
- bio_add_page(&bio, page, size, 0);
+ __bio_add_page(&bio, page, size, 0);
submit_bio_wait(&bio);
@@ -2440,16 +2445,12 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
return err;
}
-static void rdev_delayed_delete(struct work_struct *ws)
-{
- struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
- kobject_del(&rdev->kobj);
- kobject_put(&rdev->kobj);
-}
-
void md_autodetect_dev(dev_t dev);
-static void export_rdev(struct md_rdev *rdev)
+/* just for claiming the bdev */
+static struct md_rdev claim_rdev;
+
+static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
md_rdev_clear(rdev);
@@ -2457,13 +2458,15 @@ static void export_rdev(struct md_rdev *rdev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(rdev->bdev, mddev->major_version == -2 ? &claim_rdev : rdev);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
static void md_kick_rdev_from_array(struct md_rdev *rdev)
{
+ struct mddev *mddev = rdev->mddev;
+
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%pg>\n", rdev->bdev);
@@ -2477,15 +2480,17 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev)
rdev->sysfs_unack_badblocks = NULL;
rdev->sysfs_badblocks = NULL;
rdev->badblocks.count = 0;
- /* We need to delay this, otherwise we can deadlock when
- * writing to 'remove' to "dev/state". We also need
- * to delay it due to rcu usage.
- */
+
synchronize_rcu();
- INIT_WORK(&rdev->del_work, rdev_delayed_delete);
- kobject_get(&rdev->kobj);
- queue_work(md_rdev_misc_wq, &rdev->del_work);
- export_rdev(rdev);
+
+ /*
+ * kobject_del() will wait for all in progress writers to be done, where
+ * reconfig_mutex is held, hence it can't be called under
+ * reconfig_mutex and it's delayed to mddev_unlock().
+ */
+ mutex_lock(&mddev->delete_mutex);
+ list_add(&rdev->same_set, &mddev->deleting);
+ mutex_unlock(&mddev->delete_mutex);
}
static void export_array(struct mddev *mddev)
@@ -3553,6 +3558,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
+ struct kernfs_node *kn = NULL;
ssize_t rv;
struct mddev *mddev = rdev->mddev;
@@ -3560,6 +3566,10 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+
+ if (entry->store == state_store && cmd_match(page, "remove"))
+ kn = sysfs_break_active_protection(kobj, attr);
+
rv = mddev ? mddev_lock(mddev) : -ENODEV;
if (!rv) {
if (rdev->mddev == NULL)
@@ -3568,6 +3578,10 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
}
+
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
+
return rv;
}
@@ -3612,6 +3626,7 @@ int md_rdev_init(struct md_rdev *rdev)
return badblocks_init(&rdev->badblocks, 0);
}
EXPORT_SYMBOL_GPL(md_rdev_init);
+
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
@@ -3624,7 +3639,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
- static struct md_rdev claim_rdev; /* just for claiming the bdev */
struct md_rdev *rdev;
sector_t size;
int err;
@@ -3640,9 +3654,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
- rdev->bdev = blkdev_get_by_dev(newdev,
- FMODE_READ | FMODE_WRITE | FMODE_EXCL,
- super_format == -2 ? &claim_rdev : rdev);
+ rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ super_format == -2 ? &claim_rdev : rdev, NULL);
if (IS_ERR(rdev->bdev)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
@@ -3679,7 +3692,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
- blkdev_put(rdev->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
@@ -3794,8 +3807,9 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
- int msec = (mddev->safemode_delay*1000)/HZ;
- return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
+ unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
+
+ return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
@@ -3807,7 +3821,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
return -EINVAL;
}
- if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
+ if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
@@ -4477,6 +4491,8 @@ max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len
rv = kstrtouint(buf, 10, &n);
if (rv < 0)
return rv;
+ if (n > INT_MAX)
+ return -EINVAL;
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
@@ -4491,20 +4507,6 @@ null_show(struct mddev *mddev, char *page)
return -EINVAL;
}
-/* need to ensure rdev_delayed_delete() has completed */
-static void flush_rdev_wq(struct mddev *mddev)
-{
- struct md_rdev *rdev;
-
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (work_pending(&rdev->del_work)) {
- flush_workqueue(md_rdev_misc_wq);
- break;
- }
- rcu_read_unlock();
-}
-
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
@@ -4532,7 +4534,6 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
minor != MINOR(dev))
return -EOVERFLOW;
- flush_rdev_wq(mddev);
err = mddev_lock(mddev);
if (err)
return err;
@@ -4560,7 +4561,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
mddev_unlock(mddev);
if (!err)
md_new_event();
@@ -4804,11 +4805,21 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
err = -EBUSY;
- else {
+ } else if (mddev->reshape_position == MaxSector ||
+ mddev->pers->check_reshape == NULL ||
+ mddev->pers->check_reshape(mddev)) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
err = mddev->pers->start_reshape(mddev);
+ } else {
+ /*
+ * If reshape is still in progress, and
+ * md_check_recovery() can continue to reshape,
+ * don't restart reshape because data can be
+ * corrupted for raid456.
+ */
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
mddev_unlock(mddev);
}
@@ -5592,7 +5603,6 @@ struct mddev *md_alloc(dev_t dev, char *name)
* removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
- flush_workqueue(md_rdev_misc_wq);
mutex_lock(&disks_mutex);
mddev = mddev_alloc(dev);
@@ -6269,10 +6279,12 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- if (mddev->sync_thread)
- /* Thread might be blocked waiting for metadata update
- * which will now never happen */
- wake_up_process(mddev->sync_thread->tsk);
+
+ /*
+ * Thread might be blocked waiting for metadata update which will now
+ * never happen
+ */
+ md_wakeup_thread_directly(mddev->sync_thread);
if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
return -EBUSY;
@@ -6333,10 +6345,12 @@ static int do_md_stop(struct mddev *mddev, int mode,
}
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- if (mddev->sync_thread)
- /* Thread might be blocked waiting for metadata update
- * which will now never happen */
- wake_up_process(mddev->sync_thread->tsk);
+
+ /*
+ * Thread might be blocked waiting for metadata update which will now
+ * never happen
+ */
+ md_wakeup_thread_directly(mddev->sync_thread);
mddev_unlock(mddev);
wait_event(resync_wait, (mddev->sync_thread == NULL &&
@@ -6498,7 +6512,7 @@ static void autorun_devices(int part)
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
}
autorun_array(mddev);
mddev_unlock(mddev);
@@ -6508,7 +6522,7 @@ static void autorun_devices(int part)
*/
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
}
mddev_put(mddev);
}
@@ -6696,13 +6710,13 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
pr_warn("md: %pg has different UUID to %pg\n",
rdev->bdev,
rdev0->bdev);
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return -EINVAL;
}
}
err = bind_rdev_to_array(rdev, mddev);
if (err)
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return err;
}
@@ -6733,7 +6747,6 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
- set_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
@@ -6746,7 +6759,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return -EINVAL;
}
@@ -6772,7 +6785,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
}
}
if (has_journal || mddev->bitmap) {
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return -EBUSY;
}
set_bit(Journal, &rdev->flags);
@@ -6787,7 +6800,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
/* --add initiated by this node */
err = md_cluster_ops->add_new_disk(mddev, rdev);
if (err) {
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return err;
}
}
@@ -6797,7 +6810,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
err = bind_rdev_to_array(rdev, mddev);
if (err)
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
if (mddev_is_clustered(mddev)) {
if (info->state & (1 << MD_DISK_CANDIDATE)) {
@@ -6860,7 +6873,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
err = bind_rdev_to_array(rdev, mddev);
if (err) {
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return err;
}
}
@@ -6985,7 +6998,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
return 0;
abort_export:
- export_rdev(rdev);
+ export_rdev(rdev, mddev);
return err;
}
@@ -7486,7 +7499,7 @@ static int __md_set_array_info(struct mddev *mddev, void __user *argp)
return err;
}
-static int md_ioctl(struct block_device *bdev, fmode_t mode,
+static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
@@ -7555,9 +7568,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
- if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
- flush_rdev_wq(mddev);
-
if (cmd == HOT_REMOVE_DISK)
/* need to ensure recovery thread has run */
wait_event_interruptible_timeout(mddev->sb_wait,
@@ -7718,7 +7728,7 @@ out:
return err;
}
#ifdef CONFIG_COMPAT
-static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
+static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -7767,13 +7777,13 @@ out_unlock:
return err;
}
-static int md_open(struct block_device *bdev, fmode_t mode)
+static int md_open(struct gendisk *disk, blk_mode_t mode)
{
struct mddev *mddev;
int err;
spin_lock(&all_mddevs_lock);
- mddev = mddev_get(bdev->bd_disk->private_data);
+ mddev = mddev_get(disk->private_data);
spin_unlock(&all_mddevs_lock);
if (!mddev)
return -ENODEV;
@@ -7789,7 +7799,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- bdev_check_media_change(bdev);
+ disk_check_media_change(disk);
return 0;
out_unlock:
@@ -7799,7 +7809,7 @@ out:
return err;
}
-static void md_release(struct gendisk *disk, fmode_t mode)
+static void md_release(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
@@ -7886,13 +7896,29 @@ static int md_thread(void *arg)
return 0;
}
-void md_wakeup_thread(struct md_thread *thread)
+static void md_wakeup_thread_directly(struct md_thread __rcu *thread)
{
- if (thread) {
- pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
- set_bit(THREAD_WAKEUP, &thread->flags);
- wake_up(&thread->wqueue);
+ struct md_thread *t;
+
+ rcu_read_lock();
+ t = rcu_dereference(thread);
+ if (t)
+ wake_up_process(t->tsk);
+ rcu_read_unlock();
+}
+
+void md_wakeup_thread(struct md_thread __rcu *thread)
+{
+ struct md_thread *t;
+
+ rcu_read_lock();
+ t = rcu_dereference(thread);
+ if (t) {
+ pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
+ set_bit(THREAD_WAKEUP, &t->flags);
+ wake_up(&t->wqueue);
}
+ rcu_read_unlock();
}
EXPORT_SYMBOL(md_wakeup_thread);
@@ -7922,22 +7948,15 @@ struct md_thread *md_register_thread(void (*run) (struct md_thread *),
}
EXPORT_SYMBOL(md_register_thread);
-void md_unregister_thread(struct md_thread **threadp)
+void md_unregister_thread(struct md_thread __rcu **threadp)
{
- struct md_thread *thread;
+ struct md_thread *thread = rcu_dereference_protected(*threadp, true);
- /*
- * Locking ensures that mddev_unlock does not wake_up a
- * non-existent thread
- */
- spin_lock(&pers_lock);
- thread = *threadp;
- if (!thread) {
- spin_unlock(&pers_lock);
+ if (!thread)
return;
- }
- *threadp = NULL;
- spin_unlock(&pers_lock);
+
+ rcu_assign_pointer(*threadp, NULL);
+ synchronize_rcu();
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
kthread_stop(thread->tsk);
@@ -9100,6 +9119,7 @@ void md_do_sync(struct md_thread *thread)
spin_unlock(&mddev->lock);
wake_up(&resync_wait);
+ wake_up(&mddev->sb_wait);
md_wakeup_thread(mddev->thread);
return;
}
@@ -9202,9 +9222,8 @@ static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- mddev->sync_thread = md_register_thread(md_do_sync,
- mddev,
- "resync");
+ rcu_assign_pointer(mddev->sync_thread,
+ md_register_thread(md_do_sync, mddev, "resync"));
if (!mddev->sync_thread) {
pr_warn("%s: could not start resync thread...\n",
mdname(mddev));
@@ -9619,9 +9638,10 @@ static int __init md_init(void)
if (!md_misc_wq)
goto err_misc_wq;
- md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
- if (!md_rdev_misc_wq)
- goto err_rdev_misc_wq;
+ md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0);
+ if (!md_bitmap_wq)
+ goto err_bitmap_wq;
ret = __register_blkdev(MD_MAJOR, "md", md_probe);
if (ret < 0)
@@ -9641,8 +9661,8 @@ static int __init md_init(void)
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
- destroy_workqueue(md_rdev_misc_wq);
-err_rdev_misc_wq:
+ destroy_workqueue(md_bitmap_wq);
+err_bitmap_wq:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
@@ -9938,8 +9958,8 @@ static __exit void md_exit(void)
}
spin_unlock(&all_mddevs_lock);
- destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
+ destroy_workqueue(md_bitmap_wq);
destroy_workqueue(md_wq);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index fd8f260ed5f8..bfd2306bc750 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -122,8 +122,6 @@ struct md_rdev {
struct serial_in_rdev *serial; /* used for raid1 io serialization */
- struct work_struct del_work; /* used for delayed sysfs removal */
-
struct kernfs_node *sysfs_state; /* handle for 'state'
* sysfs entry */
/* handle for 'unacknowledged_bad_blocks' sysfs dentry */
@@ -367,8 +365,8 @@ struct mddev {
int new_chunk_sectors;
int reshape_backwards;
- struct md_thread *thread; /* management thread */
- struct md_thread *sync_thread; /* doing resync or reconstruct */
+ struct md_thread __rcu *thread; /* management thread */
+ struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */
/* 'last_sync_action' is initialized to "none". It is set when a
* sync operation (i.e "data-check", "requested-resync", "resync",
@@ -531,6 +529,14 @@ struct mddev {
unsigned int good_device_nr; /* good device num within cluster raid */
unsigned int noio_flag; /* for memalloc scope API */
+ /*
+ * Temporarily store rdev that will be finally removed when
+ * reconfig_mutex is unlocked.
+ */
+ struct list_head deleting;
+ /* Protect the deleting list */
+ struct mutex delete_mutex;
+
bool has_superblocks:1;
bool fail_last_dev:1;
bool serialize_policy:1;
@@ -555,6 +561,23 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
+enum md_ro_state {
+ MD_RDWR,
+ MD_RDONLY,
+ MD_AUTO_READ,
+ MD_MAX_STATE
+};
+
+static inline bool md_is_rdwr(struct mddev *mddev)
+{
+ return (mddev->ro == MD_RDWR);
+}
+
+static inline bool is_md_suspended(struct mddev *mddev)
+{
+ return percpu_ref_is_dying(&mddev->active_io);
+}
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -614,6 +637,7 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
+ void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
@@ -734,8 +758,8 @@ extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
const char *name);
-extern void md_unregister_thread(struct md_thread **threadp);
-extern void md_wakeup_thread(struct md_thread *thread);
+extern void md_unregister_thread(struct md_thread __rcu **threadp);
+extern void md_wakeup_thread(struct md_thread __rcu *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
extern int mddev_init_writes_pending(struct mddev *mddev);
@@ -828,6 +852,7 @@ struct mdu_array_info_s;
struct mdu_disk_info_s;
extern int mdp_major;
+extern struct workqueue_struct *md_bitmap_wq;
void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index e61f6cad4e08..169ebe296f2d 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -21,6 +21,7 @@
#define IO_MADE_GOOD ((struct bio *)2)
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+#define MAX_PLUG_BIO 32
/* for managing resync I/O pages */
struct resync_pages {
@@ -31,6 +32,7 @@ struct resync_pages {
struct raid1_plug_cb {
struct blk_plug_cb cb;
struct bio_list pending;
+ unsigned int count;
};
static void rbio_pool_free(void *rbio, void *data)
@@ -101,11 +103,73 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
struct page *page = resync_fetch_page(rp, idx);
int len = min_t(int, size, PAGE_SIZE);
- /*
- * won't fail because the vec table is big
- * enough to hold all these pages
- */
- bio_add_page(bio, page, len, 0);
+ if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return;
+ }
+
size -= len;
} while (idx++ < RESYNC_PAGES && size > 0);
}
+
+
+static inline void raid1_submit_write(struct bio *bio)
+{
+ struct md_rdev *rdev = (struct md_rdev *)bio->bi_bdev;
+
+ bio->bi_next = NULL;
+ bio_set_dev(bio, rdev->bdev);
+ if (test_bit(Faulty, &rdev->flags))
+ bio_io_error(bio);
+ else if (unlikely(bio_op(bio) == REQ_OP_DISCARD &&
+ !bdev_max_discard_sectors(bio->bi_bdev)))
+ /* Just ignore it */
+ bio_endio(bio);
+ else
+ submit_bio_noacct(bio);
+}
+
+static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
+ blk_plug_cb_fn unplug, int copies)
+{
+ struct raid1_plug_cb *plug = NULL;
+ struct blk_plug_cb *cb;
+
+ /*
+ * If bitmap is not enabled, it's safe to submit the io directly, and
+ * this can get optimal performance.
+ */
+ if (!md_bitmap_enabled(mddev->bitmap)) {
+ raid1_submit_write(bio);
+ return true;
+ }
+
+ cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
+ if (!cb)
+ return false;
+
+ plug = container_of(cb, struct raid1_plug_cb, cb);
+ bio_list_add(&plug->pending, bio);
+ if (++plug->count / MAX_PLUG_BIO >= copies) {
+ list_del(&cb->list);
+ cb->callback(cb, false);
+ }
+
+
+ return true;
+}
+
+/*
+ * current->bio_list will be set under submit_bio() context, in this case bitmap
+ * io will be added to the list and wait for current io submission to finish,
+ * while current io submission must wait for bitmap io to be done. In order to
+ * avoid such deadlock, submit bitmap io asynchronously.
+ */
+static inline void raid1_prepare_flush_writes(struct bitmap *bitmap)
+{
+ if (current->bio_list)
+ md_bitmap_unplug_async(bitmap);
+ else
+ md_bitmap_unplug(bitmap);
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 68a9e2d9985b..dd25832eb045 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -794,22 +794,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
{
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- md_bitmap_unplug(conf->mddev->bitmap);
+ raid1_prepare_flush_writes(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void *)bio->bi_bdev;
- bio->bi_next = NULL;
- bio_set_dev(bio, rdev->bdev);
- if (test_bit(Faulty, &rdev->flags)) {
- bio_io_error(bio);
- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !bdev_max_discard_sectors(bio->bi_bdev)))
- /* Just ignore it */
- bio_endio(bio);
- else
- submit_bio_noacct(bio);
+
+ raid1_submit_write(bio);
bio = next;
cond_resched();
}
@@ -1147,7 +1138,10 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
if (unlikely(!page))
goto free_pages;
- bio_add_page(behind_bio, page, len, 0);
+ if (!bio_add_page(behind_bio, page, len, 0)) {
+ put_page(page);
+ goto free_pages;
+ }
size -= len;
i++;
@@ -1175,7 +1169,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
struct r1conf *conf = mddev->private;
struct bio *bio;
- if (from_schedule || current->bio_list) {
+ if (from_schedule) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
spin_unlock_irq(&conf->device_lock);
@@ -1343,8 +1337,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
struct md_rdev *blocked_rdev;
- struct blk_plug_cb *cb;
- struct raid1_plug_cb *plug = NULL;
int first_clone;
int max_sectors;
bool write_behind = false;
@@ -1573,15 +1565,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)rdev;
-
- cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
- if (cb)
- plug = container_of(cb, struct raid1_plug_cb, cb);
- else
- plug = NULL;
- if (plug) {
- bio_list_add(&plug->pending, mbio);
- } else {
+ if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -2914,7 +2898,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* won't fail because the vec table is big
* enough to hold all these pages
*/
- bio_add_page(bio, page, len, 0);
+ __bio_add_page(bio, page, len, 0);
}
}
nr_sectors += len>>9;
@@ -3084,7 +3068,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
}
err = -ENOMEM;
- conf->thread = md_register_thread(raid1d, mddev, "raid1");
+ rcu_assign_pointer(conf->thread,
+ md_register_thread(raid1d, mddev, "raid1"));
if (!conf->thread)
goto abort;
@@ -3177,8 +3162,8 @@ static int raid1_run(struct mddev *mddev)
/*
* Ok, everything is just fine now
*/
- mddev->thread = conf->thread;
- conf->thread = NULL;
+ rcu_assign_pointer(mddev->thread, conf->thread);
+ rcu_assign_pointer(conf->thread, NULL);
mddev->private = conf;
set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index ebb6788820e7..468f189da7a0 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -130,7 +130,7 @@ struct r1conf {
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/
- struct md_thread *thread;
+ struct md_thread __rcu *thread;
/* Keep track of cluster resync window to send to other
* nodes.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4fcfcb350d2b..d0de8c9fb3cf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -779,8 +779,16 @@ static struct md_rdev *read_balance(struct r10conf *conf,
disk = r10_bio->devs[slot].devnum;
rdev = rcu_dereference(conf->mirrors[disk].replacement);
if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
- r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+ r10_bio->devs[slot].addr + sectors >
+ rdev->recovery_offset) {
+ /*
+ * Read replacement first to prevent reading both rdev
+ * and replacement as NULL during replacement replace
+ * rdev.
+ */
+ smp_mb();
rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ }
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags))
continue;
@@ -902,25 +910,15 @@ static void flush_pending_writes(struct r10conf *conf)
__set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
- /* flush any pending bitmap writes to disk
- * before proceeding w/ I/O */
- md_bitmap_unplug(conf->mddev->bitmap);
+ raid1_prepare_flush_writes(conf->mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
- bio->bi_next = NULL;
- bio_set_dev(bio, rdev->bdev);
- if (test_bit(Faulty, &rdev->flags)) {
- bio_io_error(bio);
- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !bdev_max_discard_sectors(bio->bi_bdev)))
- /* Just ignore it */
- bio_endio(bio);
- else
- submit_bio_noacct(bio);
+
+ raid1_submit_write(bio);
bio = next;
+ cond_resched();
}
blk_finish_plug(&plug);
} else
@@ -982,6 +980,7 @@ static void lower_barrier(struct r10conf *conf)
static bool stop_waiting_barrier(struct r10conf *conf)
{
struct bio_list *bio_list = current->bio_list;
+ struct md_thread *thread;
/* barrier is dropped */
if (!conf->barrier)
@@ -997,12 +996,14 @@ static bool stop_waiting_barrier(struct r10conf *conf)
(!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
return true;
+ /* daemon thread must exist while handling io */
+ thread = rcu_dereference_protected(conf->mddev->thread, true);
/*
* move on if io is issued from raid10d(), nr_pending is not released
* from original io(see handle_read_error()). All raise barrier is
* blocked until this io is done.
*/
- if (conf->mddev->thread->tsk == current) {
+ if (thread->tsk == current) {
WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
return true;
}
@@ -1113,7 +1114,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
struct r10conf *conf = mddev->private;
struct bio *bio;
- if (from_schedule || current->bio_list) {
+ if (from_schedule) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
spin_unlock_irq(&conf->device_lock);
@@ -1125,23 +1126,15 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
- md_bitmap_unplug(mddev->bitmap);
+ raid1_prepare_flush_writes(mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
- bio->bi_next = NULL;
- bio_set_dev(bio, rdev->bdev);
- if (test_bit(Faulty, &rdev->flags)) {
- bio_io_error(bio);
- } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !bdev_max_discard_sectors(bio->bi_bdev)))
- /* Just ignore it */
- bio_endio(bio);
- else
- submit_bio_noacct(bio);
+
+ raid1_submit_write(bio);
bio = next;
+ cond_resched();
}
kfree(plug);
}
@@ -1282,8 +1275,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
unsigned long flags;
- struct blk_plug_cb *cb;
- struct raid1_plug_cb *plug = NULL;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev;
int devnum = r10_bio->devs[n_copy].devnum;
@@ -1323,14 +1314,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
atomic_inc(&r10_bio->remaining);
- cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
- if (cb)
- plug = container_of(cb, struct raid1_plug_cb, cb);
- else
- plug = NULL;
- if (plug) {
- bio_list_add(&plug->pending, mbio);
- } else {
+ if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
spin_unlock_irqrestore(&conf->device_lock, flags);
@@ -1479,9 +1463,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
- struct md_rdev *rrdev = rcu_dereference(
- conf->mirrors[d].replacement);
+ struct md_rdev *rdev, *rrdev;
+
+ rrdev = rcu_dereference(conf->mirrors[d].replacement);
+ /*
+ * Read replacement first to prevent reading both rdev and
+ * replacement as NULL during replacement replace rdev.
+ */
+ smp_mb();
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev == rrdev)
rrdev = NULL;
if (rdev && (test_bit(Faulty, &rdev->flags)))
@@ -2148,9 +2138,10 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
int err = -EEXIST;
- int mirror;
+ int mirror, repl_slot = -1;
int first = 0;
int last = conf->geo.raid_disks - 1;
+ struct raid10_info *p;
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -2173,23 +2164,14 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
else
mirror = first;
for ( ; mirror <= last ; mirror++) {
- struct raid10_info *p = &conf->mirrors[mirror];
+ p = &conf->mirrors[mirror];
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
if (p->rdev) {
- if (!test_bit(WantReplacement, &p->rdev->flags) ||
- p->replacement != NULL)
- continue;
- clear_bit(In_sync, &rdev->flags);
- set_bit(Replacement, &rdev->flags);
- rdev->raid_disk = mirror;
- err = 0;
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- conf->fullsync = 1;
- rcu_assign_pointer(p->replacement, rdev);
- break;
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p->replacement == NULL && repl_slot < 0)
+ repl_slot = mirror;
+ continue;
}
if (mddev->gendisk)
@@ -2206,6 +2188,19 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
+ if (err && repl_slot >= 0) {
+ p = &conf->mirrors[repl_slot];
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = repl_slot;
+ err = 0;
+ if (mddev->gendisk)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ }
+
print_conf(conf);
return err;
}
@@ -3303,6 +3298,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int chunks_skipped = 0;
sector_t chunk_mask = conf->geo.chunk_mask;
int page_idx = 0;
+ int error_disk = -1;
/*
* Allow skipping a full rebuild for incremental assembly
@@ -3386,8 +3382,21 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
return reshape_request(mddev, sector_nr, skipped);
if (chunks_skipped >= conf->geo.raid_disks) {
- /* if there has been nothing to do on any drive,
- * then there is nothing to do at all..
+ pr_err("md/raid10:%s: %s fails\n", mdname(mddev),
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? "resync" : "recovery");
+ if (error_disk >= 0 &&
+ !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ /*
+ * recovery fails, set mirrors.recovery_disabled,
+ * device shouldn't be added to there.
+ */
+ conf->mirrors[error_disk].recovery_disabled =
+ mddev->recovery_disabled;
+ return 0;
+ }
+ /*
+ * if there has been nothing to do on any drive,
+ * then there is nothing to do at all.
*/
*skipped = 1;
return (max_sector - sector_nr) + sectors_skipped;
@@ -3437,8 +3446,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
- int need_recover = 0;
- int need_replace = 0;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3446,15 +3453,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mrdev = rcu_dereference(mirror->rdev);
mreplace = rcu_dereference(mirror->replacement);
- if (mrdev != NULL &&
- !test_bit(Faulty, &mrdev->flags) &&
- !test_bit(In_sync, &mrdev->flags))
- need_recover = 1;
- if (mreplace != NULL &&
- !test_bit(Faulty, &mreplace->flags))
- need_replace = 1;
+ if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
+ test_bit(In_sync, &mrdev->flags)))
+ mrdev = NULL;
+ if (mreplace && test_bit(Faulty, &mreplace->flags))
+ mreplace = NULL;
- if (!need_recover && !need_replace) {
+ if (!mrdev && !mreplace) {
rcu_read_unlock();
continue;
}
@@ -3470,8 +3475,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rcu_read_unlock();
continue;
}
- if (mreplace && test_bit(Faulty, &mreplace->flags))
- mreplace = NULL;
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -3490,7 +3493,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rcu_read_unlock();
continue;
}
- atomic_inc(&mrdev->nr_pending);
+ if (mrdev)
+ atomic_inc(&mrdev->nr_pending);
if (mreplace)
atomic_inc(&mreplace->nr_pending);
rcu_read_unlock();
@@ -3577,7 +3581,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
- if (need_recover) {
+ if (mrdev) {
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
@@ -3594,11 +3598,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio;
if (bio)
bio->bi_end_io = NULL;
- /* Note: if need_replace, then bio
+ /* Note: if replace is not NULL, then bio
* cannot be NULL as r10buf_pool_alloc will
* have allocated it.
*/
- if (!need_replace)
+ if (!mreplace)
break;
bio->bi_next = biolist;
biolist = bio;
@@ -3622,7 +3626,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
- if (!test_bit(In_sync,
+ if (mrdev && !test_bit(In_sync,
&mrdev->flags)
&& !rdev_set_badblocks(
mrdev,
@@ -3643,17 +3647,21 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mdname(mddev));
mirror->recovery_disabled
= mddev->recovery_disabled;
+ } else {
+ error_disk = i;
}
put_buf(r10_bio);
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
- rdev_dec_pending(mrdev, mddev);
+ if (mrdev)
+ rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
break;
}
- rdev_dec_pending(mrdev, mddev);
+ if (mrdev)
+ rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
@@ -3819,11 +3827,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
for (bio= biolist ; bio ; bio=bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
page = resync_fetch_page(rp, page_idx);
- /*
- * won't fail because the vec table is big enough
- * to hold all these pages
- */
- bio_add_page(bio, page, len, 0);
+ if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ goto giveup;
+ }
}
nr_sectors += len>>9;
sector_nr += len>>9;
@@ -4107,7 +4115,8 @@ static struct r10conf *setup_conf(struct mddev *mddev)
atomic_set(&conf->nr_pending, 0);
err = -ENOMEM;
- conf->thread = md_register_thread(raid10d, mddev, "raid10");
+ rcu_assign_pointer(conf->thread,
+ md_register_thread(raid10d, mddev, "raid10"));
if (!conf->thread)
goto out;
@@ -4152,8 +4161,8 @@ static int raid10_run(struct mddev *mddev)
if (!conf)
goto out;
- mddev->thread = conf->thread;
- conf->thread = NULL;
+ rcu_assign_pointer(mddev->thread, conf->thread);
+ rcu_assign_pointer(conf->thread, NULL);
if (mddev_is_clustered(conf->mddev)) {
int fc, fo;
@@ -4296,8 +4305,8 @@ static int raid10_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- mddev->sync_thread = md_register_thread(md_do_sync, mddev,
- "reshape");
+ rcu_assign_pointer(mddev->sync_thread,
+ md_register_thread(md_do_sync, mddev, "reshape"));
if (!mddev->sync_thread)
goto out_free_conf;
}
@@ -4698,8 +4707,8 @@ out:
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- mddev->sync_thread = md_register_thread(md_do_sync, mddev,
- "reshape");
+ rcu_assign_pointer(mddev->sync_thread,
+ md_register_thread(md_do_sync, mddev, "reshape"));
if (!mddev->sync_thread) {
ret = -EAGAIN;
goto abort;
@@ -4997,11 +5006,11 @@ read_more:
if (len > PAGE_SIZE)
len = PAGE_SIZE;
for (bio = blist; bio ; bio = bio->bi_next) {
- /*
- * won't fail because the vec table is big enough
- * to hold all these pages
- */
- bio_add_page(bio, page, len, 0);
+ if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return sectors_done;
+ }
}
sector_nr += len >> 9;
nr_sectors += len >> 9;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 8c072ce0bc54..63e48b11b552 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -100,7 +100,7 @@ struct r10conf {
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/
- struct md_thread *thread;
+ struct md_thread __rcu *thread;
/*
* Keep track of cluster resync window to send to other nodes.
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 46182b955aef..47ba7d9e81e1 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -120,7 +120,7 @@ struct r5l_log {
struct bio_set bs;
mempool_t meta_pool;
- struct md_thread *reclaim_thread;
+ struct md_thread __rcu *reclaim_thread;
unsigned long reclaim_target; /* number of space that need to be
* reclaimed. if it's 0, reclaim spaces
* used by io_units which are in
@@ -792,7 +792,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
io->current_bio = r5l_bio_alloc(log);
io->current_bio->bi_end_io = r5l_log_endio;
io->current_bio->bi_private = io;
- bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
+ __bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
r5_reserve_log_entry(log, io);
@@ -1576,17 +1576,18 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
void r5l_quiesce(struct r5l_log *log, int quiesce)
{
- struct mddev *mddev;
+ struct mddev *mddev = log->rdev->mddev;
+ struct md_thread *thread = rcu_dereference_protected(
+ log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
- mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
- kthread_park(log->reclaim_thread->tsk);
+ kthread_park(thread->tsk);
r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
} else
- kthread_unpark(log->reclaim_thread->tsk);
+ kthread_unpark(thread->tsk);
}
bool r5l_log_disk_error(struct r5conf *conf)
@@ -3063,6 +3064,7 @@ void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
struct r5l_log *log;
+ struct md_thread *thread;
int ret;
pr_debug("md/raid:%s: using device %pg as journal\n",
@@ -3121,11 +3123,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->tree_lock);
INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
- log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
- log->rdev->mddev, "reclaim");
- if (!log->reclaim_thread)
+ thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
+ "reclaim");
+ if (!thread)
goto reclaim_thread;
- log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+
+ thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+ rcu_assign_pointer(log->reclaim_thread, thread);
init_waitqueue_head(&log->iounit_wait);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index e495939bb3e0..eaea57aee602 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -465,7 +465,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio;
bio->bi_iter.bi_sector = log->next_io_sector;
- bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
+ __bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
pr_debug("%s: log->current_io_sector: %llu\n", __func__,
(unsigned long long)log->next_io_sector);
@@ -496,7 +496,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
prev->bi_opf, GFP_NOIO,
&ppl_conf->bs);
bio->bi_iter.bi_sector = bio_end_sector(prev);
- bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
+ __bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
bio_chain(bio, prev);
ppl_submit_iounit_bio(io, prev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4739ed891e75..6615abf54d3f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
&dd_idx, NULL);
- end_sector = bio_end_sector(raid_bio);
+ end_sector = sector + bio_sectors(raid_bio);
rcu_read_lock();
if (r5c_big_stripe_cached(conf, sector))
@@ -5966,6 +5966,19 @@ out:
return ret;
}
+static bool reshape_inprogress(struct mddev *mddev)
+{
+ return test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery);
+}
+
+static bool reshape_disabled(struct mddev *mddev)
+{
+ return is_md_suspended(mddev) || !md_is_rdwr(mddev);
+}
+
static enum stripe_result make_stripe_request(struct mddev *mddev,
struct r5conf *conf, struct stripe_request_ctx *ctx,
sector_t logical_sector, struct bio *bi)
@@ -5997,7 +6010,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_safe)) {
spin_unlock_irq(&conf->device_lock);
- return STRIPE_SCHEDULE_AND_RETRY;
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
}
spin_unlock_irq(&conf->device_lock);
@@ -6076,6 +6090,15 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
out_release:
raid5_release_stripe(sh);
+out:
+ if (ret == STRIPE_SCHEDULE_AND_RETRY && !reshape_inprogress(mddev) &&
+ reshape_disabled(mddev)) {
+ bi->bi_status = BLK_STS_IOERR;
+ ret = STRIPE_FAIL;
+ pr_err("md/raid456:%s: io failed across reshape position while reshape can't make progress.\n",
+ mdname(mddev));
+ }
+
return ret;
}
@@ -7708,7 +7731,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
}
sprintf(pers_name, "raid%d", mddev->new_level);
- conf->thread = md_register_thread(raid5d, mddev, pers_name);
+ rcu_assign_pointer(conf->thread,
+ md_register_thread(raid5d, mddev, pers_name));
if (!conf->thread) {
pr_warn("md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
@@ -7931,8 +7955,8 @@ static int raid5_run(struct mddev *mddev)
}
conf->min_offset_diff = min_offset_diff;
- mddev->thread = conf->thread;
- conf->thread = NULL;
+ rcu_assign_pointer(mddev->thread, conf->thread);
+ rcu_assign_pointer(conf->thread, NULL);
mddev->private = conf;
for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
@@ -8029,8 +8053,8 @@ static int raid5_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- mddev->sync_thread = md_register_thread(md_do_sync, mddev,
- "reshape");
+ rcu_assign_pointer(mddev->sync_thread,
+ md_register_thread(md_do_sync, mddev, "reshape"));
if (!mddev->sync_thread)
goto abort;
}
@@ -8377,6 +8401,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
p = conf->disks + disk;
tmp = rdev_mdlock_deref(mddev, p->rdev);
if (test_bit(WantReplacement, &tmp->flags) &&
+ mddev->reshape_position == MaxSector &&
p->replacement == NULL) {
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
@@ -8500,6 +8525,7 @@ static int raid5_start_reshape(struct mddev *mddev)
struct r5conf *conf = mddev->private;
struct md_rdev *rdev;
int spares = 0;
+ int i;
unsigned long flags;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -8511,6 +8537,13 @@ static int raid5_start_reshape(struct mddev *mddev)
if (has_failed(conf))
return -EINVAL;
+ /* raid5 can't handle concurrent reshape and recovery */
+ if (mddev->recovery_cp < MaxSector)
+ return -EBUSY;
+ for (i = 0; i < conf->raid_disks; i++)
+ if (rdev_mdlock_deref(mddev, conf->disks[i].replacement))
+ return -EBUSY;
+
rdev_for_each(rdev, mddev) {
if (!test_bit(In_sync, &rdev->flags)
&& !test_bit(Faulty, &rdev->flags))
@@ -8607,8 +8640,8 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- mddev->sync_thread = md_register_thread(md_do_sync, mddev,
- "reshape");
+ rcu_assign_pointer(mddev->sync_thread,
+ md_register_thread(md_do_sync, mddev, "reshape"));
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
@@ -9043,6 +9076,22 @@ static int raid5_start(struct mddev *mddev)
return r5l_start(conf->log);
}
+static void raid5_prepare_suspend(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ wait_event(mddev->sb_wait, !reshape_inprogress(mddev) ||
+ percpu_ref_is_zero(&mddev->active_io));
+ if (percpu_ref_is_zero(&mddev->active_io))
+ return;
+
+ /*
+ * Reshape is not in progress, and array is suspended, io that is
+ * waiting for reshpape can never be done.
+ */
+ wake_up(&conf->wait_for_overlap);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -9063,6 +9112,7 @@ static struct md_personality raid6_personality =
.check_reshape = raid6_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
+ .prepare_suspend = raid5_prepare_suspend,
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
@@ -9087,6 +9137,7 @@ static struct md_personality raid5_personality =
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
+ .prepare_suspend = raid5_prepare_suspend,
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
@@ -9112,6 +9163,7 @@ static struct md_personality raid4_personality =
.check_reshape = raid5_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
+ .prepare_suspend = raid5_prepare_suspend,
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index e873938a6125..f19707189a7b 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -679,7 +679,7 @@ struct r5conf {
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/
- struct md_thread *thread;
+ struct md_thread __rcu *thread;
struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
struct r5worker_group *worker_groups;
int group_cnt;
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 769ea6b2e1d0..241b1621b197 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1091,7 +1091,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
mutex_lock(&adap->lock);
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
- adap->last_initiator = 0xff;
+ if (!adap->transmit_in_progress)
+ adap->last_initiator = 0xff;
/* Check if this message was for us (directed or broadcast). */
if (!cec_msg_is_broadcast(msg)) {
@@ -1585,7 +1586,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
*
* This function is called with adap->lock held.
*/
-static int cec_adap_enable(struct cec_adapter *adap)
+int cec_adap_enable(struct cec_adapter *adap)
{
bool enable;
int ret = 0;
@@ -1595,6 +1596,9 @@ static int cec_adap_enable(struct cec_adapter *adap)
if (adap->needs_hpd)
enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID;
+ if (adap->devnode.unregistered)
+ enable = false;
+
if (enable == adap->is_enabled)
return 0;
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index af358e901b5f..7e153c5cad04 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -191,6 +191,8 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
mutex_lock(&adap->lock);
__cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
__cec_s_log_addrs(adap, NULL, false);
+ // Disable the adapter (since adap->devnode.unregistered is true)
+ cec_adap_enable(adap);
mutex_unlock(&adap->lock);
cdev_device_del(&devnode->cdev, &devnode->dev);
diff --git a/drivers/media/cec/core/cec-priv.h b/drivers/media/cec/core/cec-priv.h
index b78df931aa74..ed1f8c67626b 100644
--- a/drivers/media/cec/core/cec-priv.h
+++ b/drivers/media/cec/core/cec-priv.h
@@ -47,6 +47,7 @@ int cec_monitor_pin_cnt_inc(struct cec_adapter *adap);
void cec_monitor_pin_cnt_dec(struct cec_adapter *adap);
int cec_adap_status(struct seq_file *file, void *priv);
int cec_thread_func(void *_adap);
+int cec_adap_enable(struct cec_adapter *adap);
void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
int __cec_s_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs *log_addrs, bool block);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index c2d2792227f8..baf64540dc00 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -151,6 +151,12 @@ struct dvb_ca_private {
/* mutex serializing ioctls */
struct mutex ioctl_mutex;
+
+ /* A mutex used when a device is disconnected */
+ struct mutex remove_mutex;
+
+ /* Whether the device is disconnected */
+ int exit;
};
static void dvb_ca_private_free(struct dvb_ca_private *ca)
@@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
u8 *ebuf, int ecount);
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
- u8 *ebuf, int ecount);
+ u8 *ebuf, int ecount, int size_write_flag);
/**
* findstr - Safely find needle in haystack.
@@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
if (ret)
return ret;
- ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
+ ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW);
if (ret != 2)
return -EIO;
ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
@@ -778,11 +784,13 @@ exit:
* @buf: The data in this buffer is treated as a complete link-level packet to
* be written.
* @bytes_write: Size of ebuf.
+ * @size_write_flag: A flag on Command Register which says whether the link size
+ * information will be writen or not.
*
* return: Number of bytes written, or < 0 on error.
*/
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
- u8 *buf, int bytes_write)
+ u8 *buf, int bytes_write, int size_write_flag)
{
struct dvb_ca_slot *sl = &ca->slot_info[slot];
int status;
@@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
/* OK, set HC bit */
status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
- IRQEN | CMDREG_HC);
+ IRQEN | CMDREG_HC | size_write_flag);
if (status)
goto exit;
@@ -1508,7 +1516,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
mutex_lock(&sl->slot_lock);
status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
- fraglen + 2);
+ fraglen + 2, 0);
mutex_unlock(&sl->slot_lock);
if (status == (fraglen + 2)) {
written = 1;
@@ -1709,12 +1717,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
dprintk("%s\n", __func__);
- if (!try_module_get(ca->pub->owner))
+ mutex_lock(&ca->remove_mutex);
+
+ if (ca->exit) {
+ mutex_unlock(&ca->remove_mutex);
+ return -ENODEV;
+ }
+
+ if (!try_module_get(ca->pub->owner)) {
+ mutex_unlock(&ca->remove_mutex);
return -EIO;
+ }
err = dvb_generic_open(inode, file);
if (err < 0) {
module_put(ca->pub->owner);
+ mutex_unlock(&ca->remove_mutex);
return err;
}
@@ -1739,6 +1757,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
dvb_ca_private_get(ca);
+ mutex_unlock(&ca->remove_mutex);
return 0;
}
@@ -1758,6 +1777,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
dprintk("%s\n", __func__);
+ mutex_lock(&ca->remove_mutex);
+
/* mark the CA device as closed */
ca->open = 0;
dvb_ca_en50221_thread_update_delay(ca);
@@ -1768,6 +1789,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
dvb_ca_private_put(ca);
+ if (dvbdev->users == 1 && ca->exit == 1) {
+ mutex_unlock(&ca->remove_mutex);
+ wake_up(&dvbdev->wait_queue);
+ } else {
+ mutex_unlock(&ca->remove_mutex);
+ }
+
return err;
}
@@ -1891,6 +1919,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
}
mutex_init(&ca->ioctl_mutex);
+ mutex_init(&ca->remove_mutex);
if (signal_pending(current)) {
ret = -EINTR;
@@ -1933,6 +1962,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
dprintk("%s\n", __func__);
+ mutex_lock(&ca->remove_mutex);
+ ca->exit = 1;
+ mutex_unlock(&ca->remove_mutex);
+
+ if (ca->dvbdev->users < 1)
+ wait_event(ca->dvbdev->wait_queue,
+ ca->dvbdev->users == 1);
+
/* shutdown the thread if there was one */
kthread_stop(ca->thread);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 398c86279b5b..7c4d86bfdd6c 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -115,12 +115,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
cc = buf[3] & 0x0f;
ccok = ((feed->cc + 1) & 0x0f) == cc;
- feed->cc = cc;
if (!ccok) {
set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
dprintk_sect_loss("missed packet: %d instead of %d!\n",
cc, (feed->cc + 1) & 0x0f);
}
+ feed->cc = cc;
if (buf[1] & 0x40) // PUSI ?
feed->peslen = 0xfffa;
@@ -300,7 +300,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
cc = buf[3] & 0x0f;
ccok = ((feed->cc + 1) & 0x0f) == cc;
- feed->cc = cc;
if (buf[3] & 0x20) {
/* adaption field present, check for discontinuity_indicator */
@@ -336,6 +335,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
feed->pusi_seen = false;
dvb_dmx_swfilter_section_new(feed);
}
+ feed->cc = cc;
if (buf[1] & 0x40) {
/* PUSI=1 (is set), section boundary is here */
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index cc0a789f09ae..9293b058ab99 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
}
if (events->eventw == events->eventr) {
- int ret;
+ struct wait_queue_entry wait;
+ int ret = 0;
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
- ret = wait_event_interruptible(events->wait_queue,
- dvb_frontend_test_event(fepriv, events));
-
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&events->wait_queue, &wait);
+ while (!dvb_frontend_test_event(fepriv, events)) {
+ wait_woken(&wait, TASK_INTERRUPTIBLE, 0);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ remove_wait_queue(&events->wait_queue, &wait);
if (ret < 0)
return ret;
}
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 8a2febf33ce2..8bb8dd34c223 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
}
+static int locked_dvb_net_open(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct dvb_net *dvbnet = dvbdev->priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&dvbnet->remove_mutex))
+ return -ERESTARTSYS;
+
+ if (dvbnet->exit) {
+ mutex_unlock(&dvbnet->remove_mutex);
+ return -ENODEV;
+ }
+
+ ret = dvb_generic_open(inode, file);
+
+ mutex_unlock(&dvbnet->remove_mutex);
+
+ return ret;
+}
+
static int dvb_net_close(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_net *dvbnet = dvbdev->priv;
+ mutex_lock(&dvbnet->remove_mutex);
+
dvb_generic_release(inode, file);
- if(dvbdev->users == 1 && dvbnet->exit == 1)
+ if (dvbdev->users == 1 && dvbnet->exit == 1) {
+ mutex_unlock(&dvbnet->remove_mutex);
wake_up(&dvbdev->wait_queue);
+ } else {
+ mutex_unlock(&dvbnet->remove_mutex);
+ }
+
return 0;
}
@@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
static const struct file_operations dvb_net_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dvb_net_ioctl,
- .open = dvb_generic_open,
+ .open = locked_dvb_net_open,
.release = dvb_net_close,
.llseek = noop_llseek,
};
@@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet)
{
int i;
+ mutex_lock(&dvbnet->remove_mutex);
dvbnet->exit = 1;
+ mutex_unlock(&dvbnet->remove_mutex);
+
if (dvbnet->dvbdev->users < 1)
wait_event(dvbnet->dvbdev->wait_queue,
- dvbnet->dvbdev->users==1);
+ dvbnet->dvbdev->users == 1);
dvb_unregister_device(dvbnet->dvbdev);
@@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
int i;
mutex_init(&dvbnet->ioctl_mutex);
+ mutex_init(&dvbnet->remove_mutex);
dvbnet->demux = dmx;
for (i=0; i<DVB_NET_DEVICES_MAX; i++)
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index e9b3ce09e534..a4b05e366ccc 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -27,6 +27,7 @@
#include <media/tuner.h>
static DEFINE_MUTEX(dvbdev_mutex);
+static LIST_HEAD(dvbdevfops_list);
static int dvbdev_debug;
module_param(dvbdev_debug, int, 0644);
@@ -453,14 +454,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
enum dvb_device_type type, int demux_sink_pads)
{
struct dvb_device *dvbdev;
- struct file_operations *dvbdevfops;
+ struct file_operations *dvbdevfops = NULL;
+ struct dvbdevfops_node *node = NULL, *new_node = NULL;
struct device *clsdev;
int minor;
int id, ret;
mutex_lock(&dvbdev_register_lock);
- if ((id = dvbdev_get_free_id (adap, type)) < 0){
+ if ((id = dvbdev_get_free_id (adap, type)) < 0) {
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
pr_err("%s: couldn't find free device id\n", __func__);
@@ -468,18 +470,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
*pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
-
if (!dvbdev){
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
- dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+ /*
+ * When a device of the same type is probe()d more than once,
+ * the first allocated fops are used. This prevents memory leaks
+ * that can occur when the same device is probe()d repeatedly.
+ */
+ list_for_each_entry(node, &dvbdevfops_list, list_head) {
+ if (node->fops->owner == adap->module &&
+ node->type == type &&
+ node->template == template) {
+ dvbdevfops = node->fops;
+ break;
+ }
+ }
- if (!dvbdevfops){
- kfree (dvbdev);
- mutex_unlock(&dvbdev_register_lock);
- return -ENOMEM;
+ if (dvbdevfops == NULL) {
+ dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+ if (!dvbdevfops) {
+ kfree(dvbdev);
+ mutex_unlock(&dvbdev_register_lock);
+ return -ENOMEM;
+ }
+
+ new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
+ if (!new_node) {
+ kfree(dvbdevfops);
+ kfree(dvbdev);
+ mutex_unlock(&dvbdev_register_lock);
+ return -ENOMEM;
+ }
+
+ new_node->fops = dvbdevfops;
+ new_node->type = type;
+ new_node->template = template;
+ list_add_tail (&new_node->list_head, &dvbdevfops_list);
}
memcpy(dvbdev, template, sizeof(struct dvb_device));
@@ -490,20 +519,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvbdev->priv = priv;
dvbdev->fops = dvbdevfops;
init_waitqueue_head (&dvbdev->wait_queue);
-
dvbdevfops->owner = adap->module;
-
list_add_tail (&dvbdev->list_head, &adap->device_list);
-
down_write(&minor_rwsem);
#ifdef CONFIG_DVB_DYNAMIC_MINORS
for (minor = 0; minor < MAX_DVB_MINORS; minor++)
if (dvb_minors[minor] == NULL)
break;
-
if (minor == MAX_DVB_MINORS) {
+ if (new_node) {
+ list_del (&new_node->list_head);
+ kfree(dvbdevfops);
+ kfree(new_node);
+ }
list_del (&dvbdev->list_head);
- kfree(dvbdevfops);
kfree(dvbdev);
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
@@ -512,41 +541,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
#else
minor = nums2minor(adap->num, type, id);
#endif
-
dvbdev->minor = minor;
dvb_minors[minor] = dvb_device_get(dvbdev);
up_write(&minor_rwsem);
-
ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
if (ret) {
pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
__func__);
-
+ if (new_node) {
+ list_del (&new_node->list_head);
+ kfree(dvbdevfops);
+ kfree(new_node);
+ }
dvb_media_device_free(dvbdev);
list_del (&dvbdev->list_head);
- kfree(dvbdevfops);
kfree(dvbdev);
mutex_unlock(&dvbdev_register_lock);
return ret;
}
- mutex_unlock(&dvbdev_register_lock);
-
clsdev = device_create(dvb_class, adap->device,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
__func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
+ if (new_node) {
+ list_del (&new_node->list_head);
+ kfree(dvbdevfops);
+ kfree(new_node);
+ }
dvb_media_device_free(dvbdev);
list_del (&dvbdev->list_head);
- kfree(dvbdevfops);
kfree(dvbdev);
+ mutex_unlock(&dvbdev_register_lock);
return PTR_ERR(clsdev);
}
+
dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, minor, minor);
+ mutex_unlock(&dvbdev_register_lock);
return 0;
}
EXPORT_SYMBOL(dvb_register_device);
@@ -575,7 +610,6 @@ static void dvb_free_device(struct kref *ref)
{
struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
- kfree (dvbdev->fops);
kfree (dvbdev);
}
@@ -1081,9 +1115,17 @@ error:
static void __exit exit_dvbdev(void)
{
+ struct dvbdevfops_node *node, *next;
+
class_destroy(dvb_class);
cdev_del(&dvb_device_cdev);
unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
+
+ list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
+ list_del (&node->list_head);
+ kfree(node->fops);
+ kfree(node);
+ }
}
subsys_initcall(init_dvbdev);
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index 1f1753f2ab1a..0782f8377eb2 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
static struct i2c_driver mn88443x_driver = {
.driver = {
.name = "mn88443x",
- .of_match_table = of_match_ptr(mn88443x_of_match),
+ .of_match_table = mn88443x_of_match,
},
.probe_new = mn88443x_probe,
.remove = mn88443x_remove,
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 8287851b5ffd..d85bfbb77a25 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
netup_unidvb_dma_enable(dma, 0);
msleep(50);
cancel_work_sync(&dma->work);
- del_timer(&dma->timeout);
+ del_timer_sync(&dma->timeout);
}
static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
@@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
pci_dev->irq);
- if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
- "netup_unidvb", pci_dev) < 0) {
- dev_err(&pci_dev->dev,
- "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
- goto irq_request_err;
- }
+
ndev->dma_size = 2 * 188 *
NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
@@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
goto dma_setup_err;
}
+
+ if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+ "netup_unidvb", pci_dev) < 0) {
+ dev_err(&pci_dev->dev,
+ "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+ goto dma_setup_err;
+ }
+
dev_info(&pci_dev->dev,
"netup_unidvb: device has been initialized\n");
return 0;
@@ -951,8 +954,6 @@ spi_setup_err:
dma_free_coherent(&pci_dev->dev, ndev->dma_size,
ndev->dma_virt, ndev->dma_phys);
dma_alloc_err:
- free_irq(pci_dev->irq, pci_dev);
-irq_request_err:
iounmap(ndev->lmmio1);
pci_bar1_error:
iounmap(ndev->lmmio0);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
index 29991551cf61..0fbd030026c7 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
@@ -584,6 +584,9 @@ static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) {
for (i = 0; i < num_supported_formats; i++) {
+ if (mtk_video_formats[i].type != MTK_FMT_DEC)
+ continue;
+
mtk_video_formats[i].frmsize.max_width =
VCODEC_DEC_4K_CODED_WIDTH;
mtk_video_formats[i].frmsize.max_height =
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 898f32177b12..8640db306026 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -353,7 +353,6 @@ static int video_get_subdev_format(struct camss_video *video,
if (subdev == NULL)
return -EPIPE;
- memset(&fmt, 0, sizeof(fmt));
fmt.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 835518534e3b..61cfaaf4e927 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -397,10 +397,12 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth)
if (!raw_vpu_fmt)
return -EINVAL;
- if (ctx->is_encoder)
+ if (ctx->is_encoder) {
encoded_fmt = &ctx->dst_fmt;
- else
+ ctx->vpu_src_fmt = raw_vpu_fmt;
+ } else {
encoded_fmt = &ctx->src_fmt;
+ }
hantro_reset_fmt(&raw_fmt, raw_vpu_fmt);
raw_fmt.width = encoded_fmt->width;
diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c
index 44540de1a206..d3b5cb4a24da 100644
--- a/drivers/media/usb/dvb-usb-v2/ce6230.c
+++ b/drivers/media/usb/dvb-usb-v2/ce6230.c
@@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
if (msg[i].addr ==
ce6230_zl10353_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = DEMOD_READ;
req.value = msg[i].addr >> 1;
req.index = msg[i].buf[0];
@@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
} else {
if (msg[i].addr ==
ce6230_zl10353_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = DEMOD_WRITE;
req.value = msg[i].addr >> 1;
req.index = msg[i].buf[0];
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 7ed0ab9e429b..0e4773fc025c 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
if (msg[i].addr == ec168_ec100_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = READ_DEMOD;
req.value = 0;
req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
} else {
if (msg[i].addr == ec168_ec100_config.demod_address) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = WRITE_DEMOD;
req.value = msg[i].buf[1]; /* val */
req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = ec168_ctrl_msg(d, &req);
i += 1;
} else {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
req.cmd = WRITE_I2C;
req.value = msg[i].buf[0]; /* val */
req.index = 0x0100 + msg[i].addr; /* I2C addr */
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 795a012d4020..f7884bb56fcc 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = -EOPNOTSUPP;
goto err_mutex_unlock;
} else if (msg[0].addr == 0x10) {
+ if (msg[0].len < 1 || msg[1].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 1 - integrated demod */
if (msg[0].buf[0] == 0x00) {
/* return demod page from driver cache */
@@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = rtl28xxu_ctrl_msg(d, &req);
}
} else if (msg[0].len < 2) {
+ if (msg[0].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 2 - old I2C */
req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req.index = CMD_I2C_RD;
@@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = -EOPNOTSUPP;
goto err_mutex_unlock;
} else if (msg[0].addr == 0x10) {
+ if (msg[0].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 1 - integrated demod */
if (msg[0].buf[0] == 0x00) {
+ if (msg[0].len < 2) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* save demod page for later demod access */
dev->page = msg[0].buf[1];
ret = 0;
@@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = rtl28xxu_ctrl_msg(d, &req);
}
} else if ((msg[0].len < 23) && (!dev->new_i2c_write)) {
+ if (msg[0].len < 1) {
+ ret = -EOPNOTSUPP;
+ goto err_mutex_unlock;
+ }
/* method 2 - old I2C */
req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
req.index = CMD_I2C_WR;
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 7d78ee09be5e..a31c6f82f4e9 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -988,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
/* write/read request */
if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
req = 0xB9;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (msg[i].len << 8);
length = msg[i + 1].len + 6;
@@ -1001,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
/* demod 16bit addr */
req = 0xBD;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
value = msg[i].addr + (2 << 8);
length = msg[i].len - 2;
@@ -1026,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
} else {
req = 0xBD;
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
index = msg[i].buf[0] & 0x00FF;
value = msg[i].addr + (1 << 8);
length = msg[i].len - 1;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 2756815a780b..32134be16914 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
warn("more than 2 i2c messages at a time is not handled yet. TODO.");
for (i = 0; i < num; i++) {
+ if (msg[i].len < 1) {
+ i = -EOPNOTSUPP;
+ break;
+ }
/* write/read request */
if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0,
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 0ca764282c76..8747960e6146 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
for (i = 0; i < 6; i++) {
obuf[1] = 0xf0 + i;
if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
- break;
+ return -1;
else
mac[i] = ibuf[0];
}
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 9501b10b31aa..0df10270dbdf 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB
bool "pvrusb2 ATSC/DVB support"
default y
depends on VIDEO_PVRUSB2 && DVB_CORE
+ depends on VIDEO_PVRUSB2=m || DVB_CORE=y
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 38822cedd93a..c4474d4c44e2 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -1544,8 +1544,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
dvb_dmx_release(&dec->demux);
if (dec->fe) {
dvb_unregister_frontend(dec->fe);
- if (dec->fe->ops.release)
- dec->fe->ops.release(dec->fe);
+ dvb_frontend_detach(dec->fe);
}
dvb_unregister_adapter(&dec->adapter);
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 7aefa76a42b3..d631ce4f9f7b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -251,14 +251,17 @@ static int uvc_parse_format(struct uvc_device *dev,
/* Find the format descriptor from its GUID. */
fmtdesc = uvc_format_by_guid(&buffer[5]);
- if (fmtdesc != NULL) {
- format->fcc = fmtdesc->fcc;
- } else {
+ if (!fmtdesc) {
+ /*
+ * Unknown video formats are not fatal errors, the
+ * caller will skip this descriptor.
+ */
dev_info(&streaming->intf->dev,
"Unknown video format %pUl\n", &buffer[5]);
- format->fcc = 0;
+ return 0;
}
+ format->fcc = fmtdesc->fcc;
format->bpp = buffer[21];
/*
@@ -675,7 +678,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
interval = (u32 *)&frame[nframes];
streaming->format = format;
- streaming->nformats = nformats;
+ streaming->nformats = 0;
/* Parse the format descriptors. */
while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) {
@@ -689,7 +692,10 @@ static int uvc_parse_streaming(struct uvc_device *dev,
&interval, buffer, buflen);
if (ret < 0)
goto error;
+ if (!ret)
+ break;
+ streaming->nformats++;
frame += format->nframes;
format++;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index bf0c18100664..22fe08fce0a9 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -314,8 +314,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
{
struct fwnode_handle *endpoint;
- if (!(sink->flags & MEDIA_PAD_FL_SINK) ||
- !is_media_entity_v4l2_subdev(sink->entity))
+ if (!(sink->flags & MEDIA_PAD_FL_SINK))
return -EINVAL;
fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index f0a7531f354c..2d240bfa819f 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -6,6 +6,7 @@ config EEPROM_AT24
depends on I2C && SYSFS
select NVMEM
select NVMEM_SYSFS
+ select REGMAP
select REGMAP_I2C
help
Enable this driver to get read/write support to most I2C EEPROMs
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index f48466960f1b..30d4d0476248 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -316,12 +316,14 @@ static void fastrpc_free_map(struct kref *ref)
if (map->table) {
if (map->attr & FASTRPC_ATTR_SECUREMAP) {
struct qcom_scm_vmperm perm;
+ int vmid = map->fl->cctx->vmperms[0].vmid;
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid);
int err = 0;
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(map->phys, map->size,
- &map->fl->cctx->perms, &perm, 1);
+ &src_perms, &perm, 1);
if (err) {
dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
map->phys, map->size, err);
@@ -787,8 +789,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
goto map_err;
}
- map->phys = sg_dma_address(map->table->sgl);
- map->phys += ((u64)fl->sctx->sid << 32);
+ if (attr & FASTRPC_ATTR_SECUREMAP) {
+ map->phys = sg_phys(map->table->sgl);
+ } else {
+ map->phys = sg_dma_address(map->table->sgl);
+ map->phys += ((u64)fl->sctx->sid << 32);
+ }
map->size = len;
map->va = sg_virt(map->table->sgl);
map->len = len;
@@ -798,9 +804,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
* If subsystem VMIDs are defined in DTSI, then do
* hyp_assign from HLOS to those VM(s)
*/
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+ struct qcom_scm_vmperm dst_perms[2] = {0};
+
+ dst_perms[0].vmid = QCOM_SCM_VMID_HLOS;
+ dst_perms[0].perm = QCOM_SCM_PERM_RW;
+ dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
+ dst_perms[1].perm = QCOM_SCM_PERM_RWX;
map->attr = attr;
- err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
- fl->cctx->vmperms, fl->cctx->vmcount);
+ err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
if (err) {
dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
map->phys, map->size, err);
@@ -1892,7 +1904,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
req.vaddrout = rsp_msg.vaddr;
/* Add memory to static PD pool, protection thru hypervisor */
- if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
struct qcom_scm_vmperm perm;
perm.vmid = QCOM_SCM_VMID_HLOS;
@@ -2337,8 +2349,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
struct fastrpc_invoke_ctx *ctx;
spin_lock(&user->lock);
- list_for_each_entry(ctx, &user->pending, node)
+ list_for_each_entry(ctx, &user->pending, node) {
+ ctx->retval = -EPIPE;
complete(&ctx->work);
+ }
spin_unlock(&user->lock);
}
@@ -2349,7 +2363,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
struct fastrpc_user *user;
unsigned long flags;
+ /* No invocations past this point */
spin_lock_irqsave(&cctx->lock, flags);
+ cctx->rpdev = NULL;
list_for_each_entry(user, &cctx->users, user)
fastrpc_notify_users(user);
spin_unlock_irqrestore(&cctx->lock, flags);
@@ -2368,7 +2384,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
of_platform_depopulate(&rpdev->dev);
- cctx->rpdev = NULL;
fastrpc_channel_ctx_put(cctx);
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 00c33edb9fb9..6d01025f1fcf 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -264,6 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
@@ -357,15 +358,15 @@ static const struct attribute_group *mmc_disk_attr_groups[] = {
NULL,
};
-static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode)
{
- struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+ struct mmc_blk_data *md = mmc_blk_get(disk);
int ret = -ENXIO;
mutex_lock(&block_mutex);
if (md) {
ret = 0;
- if ((mode & FMODE_WRITE) && md->read_only) {
+ if ((mode & BLK_OPEN_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
}
@@ -375,7 +376,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
+static void mmc_blk_release(struct gendisk *disk)
{
struct mmc_blk_data *md = disk->private_data;
@@ -651,6 +652,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(req, false);
@@ -722,6 +724,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
}
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = n;
blk_execute_rq(req, false);
@@ -754,7 +757,7 @@ static int mmc_blk_check_blkdev(struct block_device *bdev)
return 0;
}
-static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mmc_blk_data *md;
@@ -791,7 +794,7 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
}
#ifdef CONFIG_COMPAT
-static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
@@ -2806,6 +2809,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
@@ -2844,6 +2848,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
index 2e120ad83020..0c5f5e371e1f 100644
--- a/drivers/mmc/core/pwrseq_sd8787.c
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -28,7 +28,6 @@ struct mmc_pwrseq_sd8787 {
struct mmc_pwrseq pwrseq;
struct gpio_desc *reset_gpio;
struct gpio_desc *pwrdn_gpio;
- u32 reset_pwrdwn_delay_ms;
};
#define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
@@ -39,7 +38,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
- msleep(pwrseq->reset_pwrdwn_delay_ms);
+ msleep(300);
gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
}
@@ -51,17 +50,37 @@ static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
}
+static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+ msleep(5);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+}
+
+static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host)
+{
+ struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+ gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
+}
+
static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
.pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
.power_off = mmc_pwrseq_sd8787_power_off,
};
-static const u32 sd8787_delay_ms = 300;
-static const u32 wilc1000_delay_ms = 5;
+static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = {
+ .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on,
+ .power_off = mmc_pwrseq_wilc1000_power_off,
+};
static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
- { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms },
- { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms },
+ { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops },
+ { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
@@ -77,7 +96,6 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
return -ENOMEM;
match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
- pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data;
pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
if (IS_ERR(pwrseq->pwrdn_gpio))
@@ -88,7 +106,7 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
return PTR_ERR(pwrseq->reset_gpio);
pwrseq->pwrseq.dev = dev;
- pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
+ pwrseq->pwrseq.ops = match->data;
pwrseq->pwrseq.owner = THIS_MODULE;
platform_set_drvdata(pdev, pwrseq);
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 8648f7e63ca1..eea208856ce0 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1403,8 +1403,8 @@ static int bcm2835_probe(struct platform_device *pdev)
host->max_clk = clk_get_rate(clk);
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ if (host->irq < 0) {
+ ret = host->irq;
goto err;
}
diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
index 39c6707fdfdb..9af6b0902efe 100644
--- a/drivers/mmc/host/litex_mmc.c
+++ b/drivers/mmc/host/litex_mmc.c
@@ -649,6 +649,7 @@ static struct platform_driver litex_mmc_driver = {
.driver = {
.name = "litex-mmc",
.of_match_table = litex_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(litex_mmc_driver);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b8514d9d5e73..ee9a25b900ae 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -991,11 +991,8 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
- if (meson_mmc_bounce_buf_read(data) ||
- meson_mmc_get_next_command(cmd))
- ret = IRQ_WAKE_THREAD;
- else
- ret = IRQ_HANDLED;
+
+ return IRQ_WAKE_THREAD;
}
out:
@@ -1007,9 +1004,6 @@ out:
writel(start, host->regs + SD_EMMC_START);
}
- if (ret == IRQ_HANDLED)
- meson_mmc_request_done(host->mmc, cmd->mrq);
-
return ret;
}
@@ -1192,8 +1186,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
return PTR_ERR(host->regs);
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0)
- return -EINVAL;
+ if (host->irq < 0)
+ return host->irq;
cd_irq = platform_get_irq_optional(pdev, 1);
mmc_gpio_set_cd_irq(mmc, cd_irq);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f2b2e8b0574e..696cbef3ff7d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1735,7 +1735,8 @@ static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
return;
if (host->variant->busy_timeout && mmc->actual_clock)
- max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
+ max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
+ MSEC_PER_SEC);
mmc->max_busy_timeout = max_busy_timeout;
}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index edade0e54a0c..9785ec91654f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2680,7 +2680,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- ret = -EINVAL;
+ ret = host->irq;
goto host_free;
}
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 629efbe639c4..b4f6a0a2fcb5 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -704,7 +704,7 @@ static int mvsd_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENXIO;
+ return irq;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index ce78edfb402b..86454f1182bb 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1343,7 +1343,7 @@ static int mmc_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENXIO;
+ return irq;
host->virt_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(host->virt_base))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 517dde777413..1e0f2d7774bd 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1791,9 +1791,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (res == NULL || irq < 0)
+ if (!res)
return -ENXIO;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 6f9d31a886ba..1bf22b08b373 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -637,7 +637,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
owl_host->irq = platform_get_irq(pdev, 0);
if (owl_host->irq < 0) {
- ret = -EINVAL;
+ ret = owl_host->irq;
goto err_release_channel;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8f0e639236b1..edf2e6c14dc6 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -829,7 +829,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
- err = -EINVAL;
+ err = host->irq;
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index b24aa27da50c..d2f625054689 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -540,9 +540,11 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
if (host->mmc->caps & MMC_CAP_HW_RESET) {
priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
- if (IS_ERR(priv->rst_hw))
- return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
- "reset controller error\n");
+ if (IS_ERR(priv->rst_hw)) {
+ ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
+ "reset controller error\n");
+ goto free;
+ }
if (priv->rst_hw)
host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d7c0c0b9e26c..eebf94604a7f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1634,6 +1634,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (ret)
return ret;
+ /* HS400/HS400ES require 8 bit bus */
+ if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA))
+ host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+
if (mmc_gpio_get_cd(host->mmc) >= 0)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
@@ -1724,10 +1728,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
host->mmc_host_ops.init_card = usdhc_init_card;
}
- err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
- if (err)
- goto disable_ahb_clk;
-
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
@@ -1735,15 +1735,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
- imx_data->socdata->flags & ESDHC_FLAG_HS400)
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
host->mmc->caps2 |= MMC_CAP2_HS400;
if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
- if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
- imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+ if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
host->mmc->caps2 |= MMC_CAP2_HS400_ES;
host->mmc_host_ops.hs400_enhanced_strobe =
esdhc_hs400_enhanced_strobe;
@@ -1765,6 +1763,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_ahb_clk;
}
+ err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+ if (err)
+ goto disable_ahb_clk;
+
sdhci_esdhc_imx_hwinit(host);
err = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ac81d57a3df..1877d583fe8c 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2479,6 +2479,9 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
msm_host->ddr_config = DDR_CONFIG_POR_VAL;
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
+
+ if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
}
static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index d463e2fd5b1a..c79035727b20 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -65,8 +65,8 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ if (host->irq < 0) {
+ ret = host->irq;
goto err_host;
}
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 0fd4c9d644dd..5cf53348372a 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1400,7 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
irq[0] = platform_get_irq(pdev, 0);
irq[1] = platform_get_irq_optional(pdev, 1);
if (irq[0] < 0)
- return -ENXIO;
+ return irq[0];
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 3db9f32d6a7b..69dcb8805e05 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1350,8 +1350,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return ret;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
+ if (host->irq < 0) {
+ ret = host->irq;
goto error_disable_mmc;
}
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 2f59917b105e..2e17903658fc 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1757,8 +1757,10 @@ static int usdhi6_probe(struct platform_device *pdev)
irq_cd = platform_get_irq_byname(pdev, "card detect");
irq_sd = platform_get_irq_byname(pdev, "data");
irq_sdio = platform_get_irq_byname(pdev, "SDIO");
- if (irq_sd < 0 || irq_sdio < 0)
- return -ENODEV;
+ if (irq_sd < 0)
+ return irq_sd;
+ if (irq_sdio < 0)
+ return irq_sdio;
mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
if (!mmc)
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e4c4bfac3763..9ec593d52f0f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -1713,6 +1713,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300,
int bytes = 3 & less_cmd;
int words = less_cmd >> 2;
u8 *r = vub300->resp.response.command_response;
+
+ if (!resp_len)
+ return;
if (bytes == 3) {
cmd->resp[words] = (r[1 + (words << 2)] << 24)
| (r[2 + (words << 2)] << 16)
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 4cd37ec45762..be106dc20ff3 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -209,40 +209,34 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->blkdev) {
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
- blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ blkdev_put(dev->blkdev, NULL);
}
kfree(dev);
}
-
-static struct block2mtd_dev *add_device(char *devname, int erase_size,
- char *label, int timeout)
+/*
+ * This function is marked __ref because it calls the __init marked
+ * early_lookup_bdev when called from the early boot code.
+ */
+static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
+ blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
+ struct block_device *bdev = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
-#endif
- const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
- struct block_device *bdev;
- struct block2mtd_dev *dev;
- char *name;
- if (!devname)
- return NULL;
-
- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
- /* Get a handle on the device */
- bdev = blkdev_get_by_path(devname, mode, dev);
+ /*
+ * We can't use early_lookup_bdev from a running system.
+ */
+ if (system_state >= SYSTEM_RUNNING)
+ return bdev;
-#ifndef MODULE
/*
* We might not have the root device mounted at this point.
* Try to resolve the device name by other means.
*/
- for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
+ for (i = 0; i <= timeout; i++) {
dev_t devt;
if (i)
@@ -254,13 +248,35 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
msleep(1000);
wait_for_device_probe();
- devt = name_to_dev_t(devname);
- if (!devt)
- continue;
- bdev = blkdev_get_by_dev(devt, mode, dev);
+ if (!early_lookup_bdev(devname, &devt)) {
+ bdev = blkdev_get_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev))
+ break;
+ }
}
#endif
+ return bdev;
+}
+static struct block2mtd_dev *add_device(char *devname, int erase_size,
+ char *label, int timeout)
+{
+ const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
+ struct block_device *bdev;
+ struct block2mtd_dev *dev;
+ char *name;
+
+ if (!devname)
+ return NULL;
+
+ dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ /* Get a handle on the device */
+ bdev = blkdev_get_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev))
+ bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev);
if (IS_ERR(bdev)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 60b222799871..ff18636e0889 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -182,9 +182,9 @@ static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int blktrans_open(struct block_device *bdev, fmode_t mode)
+static int blktrans_open(struct gendisk *disk, blk_mode_t mode)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = disk->private_data;
int ret = 0;
kref_get(&dev->ref);
@@ -208,7 +208,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
ret = __get_mtd_device(dev->mtd);
if (ret)
goto error_release;
- dev->file_mode = mode;
+ dev->writable = mode & BLK_OPEN_WRITE;
unlock:
dev->open++;
@@ -225,7 +225,7 @@ error_put:
return ret;
}
-static void blktrans_release(struct gendisk *disk, fmode_t mode)
+static void blktrans_release(struct gendisk *disk)
{
struct mtd_blktrans_dev *dev = disk->private_data;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index a0a1194dc1d9..fa476fb4dffb 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -294,7 +294,7 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd)
* It was the last usage. Free the cache, but only sync if
* opened for writing.
*/
- if (mbd->file_mode & FMODE_WRITE)
+ if (mbd->writable)
mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 01f1c6792df9..8dc4f5c493fc 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -590,8 +590,8 @@ static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
(end_page - start_page + 1) * oob_per_page);
}
-static int mtdchar_write_ioctl(struct mtd_info *mtd,
- struct mtd_write_req __user *argp)
+static noinline_for_stack int
+mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_write_req req;
@@ -688,8 +688,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
return ret;
}
-static int mtdchar_read_ioctl(struct mtd_info *mtd,
- struct mtd_read_req __user *argp)
+static noinline_for_stack int
+mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
{
struct mtd_info *master = mtd_get_master(mtd);
struct mtd_read_req req;
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
index 2cda439b5e11..017868f59f22 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
@@ -36,25 +36,25 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
void ingenic_ecc_release(struct ingenic_ecc *ecc);
struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
#else /* CONFIG_MTD_NAND_INGENIC_ECC */
-int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params,
const u8 *buf, u8 *ecc_code)
{
return -ENODEV;
}
-int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc,
struct ingenic_ecc_params *params, u8 *buf,
u8 *ecc_code)
{
return -ENODEV;
}
-void ingenic_ecc_release(struct ingenic_ecc *ecc)
+static inline void ingenic_ecc_release(struct ingenic_ecc *ecc)
{
}
-struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index afb424579f0b..30c15e4e1cc0 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2457,6 +2457,12 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
NDTR1_WAIT_MODE;
}
+ /*
+ * Reset nfc->selected_chip so the next command will cause the timing
+ * registers to be updated in marvell_nfc_select_target().
+ */
+ nfc->selected_chip = NULL;
+
return 0;
}
@@ -2894,10 +2900,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
GENCONF_CLK_GATING_CTRL_ND_GATE,
GENCONF_CLK_GATING_CTRL_ND_GATE);
-
- regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL,
- GENCONF_ND_CLK_CTRL_EN,
- GENCONF_ND_CLK_CTRL_EN);
}
/* Configure the DMA if appropriate */
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 0bb0ad14a2fc..5f29fac8669a 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2018,6 +2018,7 @@ static const struct spi_nor_manufacturer *manufacturers[] = {
static const struct flash_info spi_nor_generic_flash = {
.name = "spi-nor-generic",
+ .n_banks = 1,
/*
* JESD216 rev A doesn't specify the page size, therefore we need a
* sane default.
@@ -2921,7 +2922,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
spi_nor_init_default_locking_ops(nor);
- nor->params->bank_size = div64_u64(nor->params->size, nor->info->n_banks);
+ if (nor->info->n_banks > 1)
+ params->bank_size = div64_u64(params->size, nor->info->n_banks);
}
/**
@@ -2987,6 +2989,7 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
/* Set SPI NOR sizes. */
params->writesize = 1;
params->size = (u64)info->sector_size * info->n_sectors;
+ params->bank_size = params->size;
params->page_size = info->page_size;
if (!(info->flags & SPI_NOR_NO_FR)) {
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 15f9a80c10b9..36876aa849ed 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor,
*/
static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
{
- struct spi_mem_op op;
+ struct spi_mem_op op = {};
u8 addr_mode;
int ret;
@@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- struct spi_mem_op op;
+ struct spi_mem_op op = {};
int ret;
ret = cypress_nor_set_addr_mode_nbytes(nor);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 3711d7f74600..437c5b83ffe5 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -227,9 +227,9 @@ static blk_status_t ubiblock_read(struct request *req)
return BLK_STS_OK;
}
-static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+static int ubiblock_open(struct gendisk *disk, blk_mode_t mode)
{
- struct ubiblock *dev = bdev->bd_disk->private_data;
+ struct ubiblock *dev = disk->private_data;
int ret;
mutex_lock(&dev->dev_mutex);
@@ -246,11 +246,10 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
* It's just a paranoid check, as write requests will get rejected
* in any case.
*/
- if (mode & FMODE_WRITE) {
+ if (mode & BLK_OPEN_WRITE) {
ret = -EROFS;
goto out_unlock;
}
-
dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
if (IS_ERR(dev->desc)) {
dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
@@ -270,7 +269,7 @@ out_unlock:
return ret;
}
-static void ubiblock_release(struct gendisk *gd, fmode_t mode)
+static void ubiblock_release(struct gendisk *gd)
{
struct ubiblock *dev = gd->private_data;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3fed888629f7..edbaa1444f8e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3947,7 +3947,11 @@ static int bond_slave_netdev_event(unsigned long event,
unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
- bond_compute_features(bond);
+ if (!bond->notifier_ctx) {
+ bond->notifier_ctx = true;
+ bond_compute_features(bond);
+ bond->notifier_ctx = false;
+ }
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
@@ -6342,6 +6346,8 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ bond->notifier_ctx = false;
+
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 3ceccafd701b..b190007c01be 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -95,7 +95,7 @@ config CAN_AT91
config CAN_BXCAN
tristate "STM32 Basic Extended CAN (bxCAN) devices"
- depends on OF || ARCH_STM32 || COMPILE_TEST
+ depends on ARCH_STM32 || COMPILE_TEST
depends on HAS_IOMEM
select CAN_RX_OFFLOAD
help
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index e26ccd41e3cb..027a8a162fe4 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -118,7 +118,7 @@
#define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
#define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
-#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14)
+#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
/* Filter primary register (FMR) bits */
#define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
@@ -135,6 +135,12 @@ enum bxcan_lec_code {
BXCAN_LEC_UNUSED
};
+enum bxcan_cfg {
+ BXCAN_CFG_SINGLE = 0,
+ BXCAN_CFG_DUAL_PRIMARY,
+ BXCAN_CFG_DUAL_SECONDARY
+};
+
/* Structure of the message buffer */
struct bxcan_mb {
u32 id; /* can identifier */
@@ -167,7 +173,7 @@ struct bxcan_priv {
struct regmap *gcan;
int tx_irq;
int sce_irq;
- bool primary;
+ enum bxcan_cfg cfg;
struct clk *clk;
spinlock_t rmw_lock; /* lock for read-modify-write operations */
unsigned int tx_head;
@@ -202,17 +208,17 @@ static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
spin_unlock_irqrestore(&priv->rmw_lock, flags);
}
-static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary)
+static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
{
- unsigned int fid = BXCAN_FILTER_ID(primary);
+ unsigned int fid = BXCAN_FILTER_ID(cfg);
u32 fmask = BIT(fid);
regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
}
-static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary)
+static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
{
- unsigned int fid = BXCAN_FILTER_ID(primary);
+ unsigned int fid = BXCAN_FILTER_ID(cfg);
u32 fmask = BIT(fid);
/* Filter settings:
@@ -680,7 +686,7 @@ static int bxcan_chip_start(struct net_device *ndev)
BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
BXCAN_BTR_SJW_MASK, set);
- bxcan_enable_filters(priv, priv->primary);
+ bxcan_enable_filters(priv, priv->cfg);
/* Clear all internal status */
priv->tx_head = 0;
@@ -806,7 +812,7 @@ static void bxcan_chip_stop(struct net_device *ndev)
BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
- bxcan_disable_filters(priv, priv->primary);
+ bxcan_disable_filters(priv, priv->cfg);
bxcan_enter_sleep_mode(priv);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -931,7 +937,7 @@ static int bxcan_probe(struct platform_device *pdev)
struct clk *clk = NULL;
void __iomem *regs;
struct regmap *gcan;
- bool primary;
+ enum bxcan_cfg cfg;
int err, rx_irq, tx_irq, sce_irq;
regs = devm_platform_ioremap_resource(pdev, 0);
@@ -946,7 +952,13 @@ static int bxcan_probe(struct platform_device *pdev)
return PTR_ERR(gcan);
}
- primary = of_property_read_bool(np, "st,can-primary");
+ if (of_property_read_bool(np, "st,can-primary"))
+ cfg = BXCAN_CFG_DUAL_PRIMARY;
+ else if (of_property_read_bool(np, "st,can-secondary"))
+ cfg = BXCAN_CFG_DUAL_SECONDARY;
+ else
+ cfg = BXCAN_CFG_SINGLE;
+
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "failed to get clock\n");
@@ -992,7 +1004,7 @@ static int bxcan_probe(struct platform_device *pdev)
priv->clk = clk;
priv->tx_irq = tx_irq;
priv->sce_irq = sce_irq;
- priv->primary = primary;
+ priv->cfg = cfg;
priv->can.clock.freq = clk_get_rate(clk);
spin_lock_init(&priv->rmw_lock);
priv->tx_head = 0;
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 241ec636e91f..f6d05b3ef59a 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* check flag whether this packet has to be looped back */
if (!(dev->flags & IFF_ECHO) ||
(skb->protocol != htons(ETH_P_CAN) &&
- skb->protocol != htons(ETH_P_CANFD))) {
+ skb->protocol != htons(ETH_P_CANFD) &&
+ skb->protocol != htons(ETH_P_CANXL))) {
kfree_skb(skb);
return 0;
}
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 53e8a914c88b..be189edb256c 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
/* Shared receive buffer registers */
#define KVASER_PCIEFD_SRB_BASE 0x1f200
+#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
/* EPCS flash controller registers */
#define KVASER_PCIEFD_SPI_BASE 0x1fc00
@@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
/* DMA support */
#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+/* SRB current packet level */
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
+
/* DMA Enable */
#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
@@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
- KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+ KVASER_PCIEFD_KCAN_IRQ_TAR;
iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
@@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
+ else
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
@@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
@@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
@@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
del_timer(&can->bec_poll_timer);
}
+ can->can.state = CAN_STATE_STOPPED;
close_candev(netdev);
return ret;
@@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
SET_NETDEV_DEV(netdev, &pcie->pci->dev);
iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
- KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
pcie->can[i] = can;
@@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
u32 srb_status;
+ u32 srb_packet_count;
dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
/* Disable the DMA */
@@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ /* Empty Rx FIFO */
+ srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
+ KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
+ while (srb_packet_count) {
+ /* Drop current packet in FIFO */
+ ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
+ srb_packet_count--;
+ }
+
srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
@@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
cmd = KVASER_PCIEFD_KCAN_CMD_AT;
cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
-
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
- can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
@@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
netdev_err(can->can.dev, "Tx FIFO overflow\n");
- if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
- u8 count = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
-
- if (count == 0)
- iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
- can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
- }
-
if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
netdev_err(can->can.dev,
"Fail to change bittiming, when not in reset mode\n");
@@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
if (err)
goto err_teardown_can_ctrls;
+ err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (err)
+ goto err_teardown_can_ctrls;
+
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
@@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
- if (err)
- goto err_teardown_can_ctrls;
-
err = kvaser_pciefd_reg_candev(pcie);
if (err)
goto err_free_irq;
@@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
+ /* Disable PCI interrupts */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
free_irq(pcie->pci->irq, pcie);
err_teardown_can_ctrls:
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index cbe831875347..c0215a8770f4 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1188,8 +1188,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
- if (vid)
- return -EOPNOTSUPP;
return lan9303_alr_add_port(chip, addr, port, false);
}
@@ -1201,8 +1199,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
- if (vid)
- return -EOPNOTSUPP;
lan9303_alr_del_port(chip, addr, port);
return 0;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 9bc54e1348cb..7e773c4ba046 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -399,6 +399,20 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
}
+/* If port 6 is available as a CPU port, always prefer that as the default,
+ * otherwise don't care.
+ */
+static struct dsa_port *
+mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
+
+ if (dsa_port_is_cpu(cpu_dp))
+ return cpu_dp;
+
+ return NULL;
+}
+
/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
@@ -985,6 +999,18 @@ unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
+static void
+mt753x_trap_frames(struct mt7530_priv *priv)
+{
+ /* Trap BPDUs to the CPU port(s) */
+ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
+ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+}
+
static int
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
@@ -1007,9 +1033,16 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
UNU_FFP(BIT(port)));
/* Set CPU port number */
- if (priv->id == ID_MT7621)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+ /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
+ * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
+ * is affine to the inbound user port.
+ */
+ if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
+ mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
+
/* CPU port gets connected to all user ports of
* the switch.
*/
@@ -2255,6 +2288,8 @@ mt7530_setup(struct dsa_switch *ds)
priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ mt753x_trap_frames(priv);
+
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
@@ -2352,17 +2387,9 @@ static int
mt7531_setup_common(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- struct dsa_port *cpu_dp;
int ret, i;
- /* BPDU to CPU port */
- dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
- BIT(cpu_dp->index));
- break;
- }
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
- MT753X_BPDU_CPU_ONLY);
+ mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
@@ -3085,6 +3112,7 @@ static int mt7988_setup(struct dsa_switch *ds)
const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
+ .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 5084f48a8869..08045b035e6a 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -54,6 +54,7 @@ enum mt753x_id {
#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
+#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
#define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
MT7531_CFC : MT7530_MFC)
@@ -66,6 +67,11 @@ enum mt753x_id {
#define MT753X_BPC 0x24
#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+/* Register for :03 and :0E MAC DA frame control */
+#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
+#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+
enum mt753x_bpdu_port_fw {
MT753X_BPDU_FOLLOW_MFC,
MT753X_BPDU_CPU_EXCLUDE = 4,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 64a2f2f83735..08a46ffd53af 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -7170,7 +7170,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
goto out;
}
if (chip->reset)
- usleep_range(1000, 2000);
+ usleep_range(10000, 20000);
/* Detect if the device is configured in single chip addressing mode,
* otherwise continue with address specific smi init/detection.
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index aec9d4fd20e3..d19b6303b91f 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -276,7 +276,7 @@
/* Offset 0x10: Extended Port Control Command */
#define MV88E6393X_PORT_EPC_CMD 0x10
#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000
-#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300
+#define MV88E6393X_PORT_EPC_CMD_WRITE 0x3000
#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02
/* Offset 0x11: Extended Port Control Data */
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index cfb3faeaa5bf..d172a3e9736c 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1263,7 +1263,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
/* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
* 4 octets FCS, 12 octets IFG.
*/
- needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
+ needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
dev_dbg(ocelot->dev,
"port %d: max frame size %d needs %llu ps at speed %d\n",
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
index 4347b42c50fd..de9da469908b 100644
--- a/drivers/net/dsa/qca/Kconfig
+++ b/drivers/net/dsa/qca/Kconfig
@@ -20,6 +20,7 @@ config NET_DSA_QCA8K_LEDS_SUPPORT
bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support"
depends on NET_DSA_QCA8K
depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K
+ depends on LEDS_TRIGGERS
help
This enabled support for LEDs present on the Qualcomm Atheros
QCA8K Ethernet switch chips.
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 919027cf2012..c37d2e537230 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
}
+static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 mask = A5PSW_PORT_ENA_TX(port);
+ u32 reg = enable ? mask : 0;
+
+ /* Even though the port TX is disabled through TXENA bit in the
+ * PORT_ENA register, it can still send BPDUs. This depends on the tag
+ * configuration added when sending packets from the CPU port to the
+ * switch port. Indeed, when using forced forwarding without filtering,
+ * even disabled ports will be able to send packets that are tagged.
+ * This allows to implement STP support when ports are in a state where
+ * forwarding traffic should be stopped but BPDUs should still be sent.
+ */
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
+}
+
static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
{
u32 port_ena = 0;
@@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
+static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port);
+ u32 reg = !learn ? mask : 0;
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
+{
+ u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
+ u32 reg = block ? mask : 0;
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
@@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
}
+static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
+ bool standalone)
+{
+ a5psw_port_learning_set(a5psw, port, !standalone);
+ a5psw_flooding_set_resolution(a5psw, port, !standalone);
+ a5psw_port_mgmtfwd_set(a5psw, port, standalone);
+}
+
static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
@@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
}
a5psw->br_dev = bridge.dev;
- a5psw_flooding_set_resolution(a5psw, port, true);
- a5psw_port_mgmtfwd_set(a5psw, port, false);
+ a5psw_port_set_standalone(a5psw, port, false);
return 0;
}
@@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
{
struct a5psw *a5psw = ds->priv;
- a5psw_flooding_set_resolution(a5psw, port, false);
- a5psw_port_mgmtfwd_set(a5psw, port, true);
+ a5psw_port_set_standalone(a5psw, port, true);
/* No more ports bridged */
if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
@@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+ bool learning_enabled, rx_enabled, tx_enabled;
struct a5psw *a5psw = ds->priv;
- u32 reg = 0;
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
- reg |= A5PSW_INPUT_LEARN_DIS(port);
- reg |= A5PSW_INPUT_LEARN_BLOCK(port);
- break;
case BR_STATE_LISTENING:
- reg |= A5PSW_INPUT_LEARN_DIS(port);
+ rx_enabled = false;
+ tx_enabled = false;
+ learning_enabled = false;
break;
case BR_STATE_LEARNING:
- reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ rx_enabled = false;
+ tx_enabled = false;
+ learning_enabled = true;
break;
case BR_STATE_FORWARDING:
- default:
+ rx_enabled = true;
+ tx_enabled = true;
+ learning_enabled = true;
break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
}
- a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+ a5psw_port_learning_set(a5psw, port, learning_enabled);
+ a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
+ a5psw_port_tx_enable(a5psw, port, tx_enabled);
}
static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
@@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds)
}
/* Configure management port */
- reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
/* Set pattern 0 to forward all frame to mgmt port */
@@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds)
if (dsa_port_is_unused(dp))
continue;
- /* Enable egress flooding for CPU port */
- if (dsa_port_is_cpu(dp))
+ /* Enable egress flooding and learning for CPU port */
+ if (dsa_port_is_cpu(dp)) {
a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_learning_set(a5psw, port, true);
+ }
- /* Enable management forward only for user ports */
+ /* Enable standalone mode for user ports */
if (dsa_port_is_user(dp))
- a5psw_port_mgmtfwd_set(a5psw, port, true);
+ a5psw_port_set_standalone(a5psw, port, true);
}
return 0;
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
index c67abd49c013..b869192eef3f 100644
--- a/drivers/net/dsa/rzn1_a5psw.h
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -19,6 +19,7 @@
#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_TX(port) BIT(port)
#define A5PSW_PORT_ENA_RX_SHIFT 16
#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
BIT(port))
@@ -36,7 +37,7 @@
#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
#define A5PSW_MGMT_CFG 0x20
-#define A5PSW_MGMT_CFG_DISCARD BIT(7)
+#define A5PSW_MGMT_CFG_ENABLE BIT(6)
#define A5PSW_MODE_CFG 0x24
#define A5PSW_MODE_STATS_RESET BIT(31)
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index d2f4358cc550..ba3e7aa1a28f 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -66,8 +66,10 @@ static int max_interrupt_work = 20;
#include <linux/timer.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
-
#include <linux/uaccess.h>
+
+#include <net/Space.h>
+
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 82f94b1635bf..5267e9dcd87e 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
struct net_device *dev;
+ int ret;
dev_dbg(&link->dev, "3c589_attach()\n");
@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
dev->ethtool_ops = &netdev_ethtool_ops;
- return tc589_config(link);
+ ret = tc589_config(link);
+ if (ret)
+ goto err_free_netdev;
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+ return ret;
}
static void tc589_detach(struct pcmcia_device *link)
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 0a9118b8be0c..bc9c81dc00fd 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -52,6 +52,7 @@ static const char version2[] =
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
+#include <net/Space.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 6e62c37c9400..7465650c8078 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -66,6 +66,7 @@ static const char version[] =
#include <linux/isapnp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 5b00c452bede..119021d41451 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -37,6 +37,7 @@ static const char version[] =
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <net/Space.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 8971665a4b2a..6cf38180cc01 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/bitops.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index f7c597ea5daf..debe5216fe29 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -68,9 +68,15 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
bool pdsc_is_fw_good(struct pdsc *pdsc)
{
- u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+ bool fw_running = pdsc_is_fw_running(pdsc);
+ u8 gen;
- return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation;
+ /* Make sure to update the cached fw_status by calling
+ * pdsc_is_fw_running() before getting the generation
+ */
+ gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+ return fw_running && gen == pdsc->fw_generation;
}
static u8 pdsc_devcmd_status(struct pdsc *pdsc)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 33a9574e9e04..32d2c6fac652 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1329,7 +1329,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
return pdata->phy_if.phy_impl.an_outcome(pdata);
}
-static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
enum xgbe_mode mode;
@@ -1367,8 +1367,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
- if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+ if (!xgbe_set_mode(pdata, mode))
+ return false;
+
+ if (pdata->an_again)
xgbe_phy_reconfig_aneg(pdata);
+
+ return true;
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1398,7 +1403,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
- xgbe_phy_status_result(pdata);
+ if (xgbe_phy_status_result(pdata))
+ return;
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 38d0cdaf22a5..bf1611cce974 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2531,9 +2531,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->irq0 = platform_get_irq(pdev, 0);
if (!priv->is_lite) {
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
} else {
- priv->wol_irq = platform_get_irq(pdev, 1);
+ priv->wol_irq = platform_get_irq_optional(pdev, 1);
}
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 637d162bbcfa..1e7a6f1d4223 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14294,11 +14294,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
- if (netif_running(dev))
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (netif_running(dev)) {
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
+ goto done;
+ }
+ }
netif_device_attach(dev);
+done:
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dcd9367f05af..b499bc9c4e06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -692,7 +692,7 @@ next_tx_int:
__netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
- READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
+ READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -2365,6 +2365,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u64 ns;
+ if (!ptp)
+ goto async_event_process_exit;
+
spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
@@ -4763,6 +4766,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
continue;
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
+ !bp->ptp_cfg)
+ continue;
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
}
if (bmap && bmap_size) {
@@ -5350,6 +5356,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
return;
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
/* all contexts configured to same hash_type, zero always exists */
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
resp = hwrm_req_hold(bp, req);
@@ -8812,6 +8819,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
+ if (BNXT_VF(bp))
+ bnxt_hwrm_func_qcfg(bp);
+
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
@@ -11598,6 +11608,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
static void bnxt_fw_health_check(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
@@ -11611,7 +11622,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- if (val == fw_health->last_fw_heartbeat) {
+ if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
fw_health->arrests++;
goto fw_reset;
}
@@ -11619,7 +11630,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- if (val != fw_health->last_fw_reset_cnt) {
+ if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
fw_health->discoveries++;
goto fw_reset;
}
@@ -13025,26 +13036,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
+static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
- struct udp_tunnel_info ti;
unsigned int cmd;
- udp_tunnel_nic_get_port(netdev, table, 0, &ti);
- if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- if (ti.port)
- return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
+}
+
+static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct bnxt *bp = netdev_priv(netdev);
+ unsigned int cmd;
+
+ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
- .sync_table = bnxt_udp_tunnel_sync,
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2dd8ee4a6f75..8fd5071d8b09 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -3831,7 +3831,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
}
}
- if (req & BNXT_FW_RESET_AP) {
+ if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code >= 0x10803) {
if (!bnxt_firmware_reset_ap(dev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index e46689128e32..f3886710e778 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -952,6 +952,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_timecounter_init(bp, true);
bnxt_ptp_adjfine_rtc(bp, 0);
}
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f28ffc31df22..2b5761ad2f92 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1272,7 +1272,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@@ -1292,7 +1293,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
/* Enable EEE and switch to a 27Mhz clock automatically */
reg = bcmgenet_readl(priv->base + off);
- if (enable)
+ if (tx_lpi_enabled)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@@ -1313,6 +1314,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
priv->eee.eee_enabled = enable;
priv->eee.eee_active = enable;
+ priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -1328,6 +1330,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e->eee_enabled = p->eee_enabled;
e->eee_active = p->eee_active;
+ e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
@@ -1337,7 +1340,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct ethtool_eee *p = &priv->eee;
- int ret = 0;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1348,16 +1350,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
p->eee_enabled = e->eee_enabled;
if (!p->eee_enabled) {
- bcmgenet_eee_enable_set(dev, false);
+ bcmgenet_eee_enable_set(dev, false, false);
} else {
- ret = phy_init_eee(dev->phydev, false);
- if (ret) {
- netif_err(priv, hw, dev, "EEE initialization failed\n");
- return ret;
- }
-
+ p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, true);
+ bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
@@ -3450,7 +3447,7 @@ err_clk_disable:
return ret;
}
-static void bcmgenet_netif_stop(struct net_device *dev)
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3465,6 +3462,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3485,7 +3484,7 @@ static int bcmgenet_close(struct net_device *dev)
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, false);
/* Really kill the PHY state machine and disconnect from it */
phy_disconnect(dev->phydev);
@@ -4277,9 +4276,6 @@ static int bcmgenet_resume(struct device *d)
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
- if (priv->eee.eee_enabled)
- bcmgenet_eee_enable_set(dev, true);
-
bcmgenet_netif_start(dev);
netif_device_attach(dev);
@@ -4303,7 +4299,7 @@ static int bcmgenet_suspend(struct device *d)
netif_device_detach(dev);
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, true);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 946f6e283c4e..1985c0ec4da2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled);
+
#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index be042905ada2..c15ed0acdb77 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -87,6 +87,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmgenet_eee_enable_set(dev,
+ priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.tx_lpi_enabled);
}
/* setup netdev link state when PHY link status change and
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 06a0c00af99c..276c32c3926a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -72,6 +72,8 @@
#include <linux/gfp.h>
#include <linux/io.h>
+#include <net/Space.h>
+
#include <asm/irq.h>
#include <linux/atomic.h>
#if ALLOW_DMA
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7e408bcc88de..0defd519ba62 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1135,8 +1135,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
- (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
- is_ipv4_pkt(skb)) {
+ (lancer_chip(adapter) || BE3_chip(adapter) ||
+ skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index b1871e6c4006..00e50bd30189 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -54,6 +54,9 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
case DPMAC_ETH_IF_XFI:
*if_mode = PHY_INTERFACE_MODE_10GBASER;
break;
+ case DPMAC_ETH_IF_CAUI:
+ *if_mode = PHY_INTERFACE_MODE_25GBASER;
+ break;
default:
return -EINVAL;
}
@@ -79,6 +82,8 @@ static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
return DPMAC_ETH_IF_XFI;
case PHY_INTERFACE_MODE_1000BASEX:
return DPMAC_ETH_IF_1000BASEX;
+ case PHY_INTERFACE_MODE_25GBASER:
+ return DPMAC_ETH_IF_CAUI;
default:
return DPMAC_ETH_IF_MII;
}
@@ -418,7 +423,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
- MAC_10000FD;
+ MAC_10000FD | MAC_25000FD;
dpaa2_mac_set_supported_interfaces(mac);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3c4fa26f0f9b..9e1b2536e9a9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1229,7 +1229,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (!skb)
break;
- rx_byte_cnt += skb->len;
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
napi_gro_receive(napi, skb);
@@ -1565,6 +1571,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
&cleaned_cnt, &xdp_buff);
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
+
xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
switch (xdp_act) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 83c27bbbc6ed..126007ab70f6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -181,8 +181,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
int bw_sum = 0;
u8 bw;
- prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
- prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
+ prio_top = tc_nums - 1;
+ prio_next = tc_nums - 2;
/* Support highest prio and second prio tc in cbs mode */
if (tc != prio_top && tc != prio_next)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 42ec6ca3bf03..38e5b5abe067 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3798,7 +3798,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
netdev_err(fep->netdev, "NOT enough BD for SG!\n");
- xdp_return_frame(frame);
return NETDEV_TX_BUSY;
}
@@ -3835,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
txq->tx_skbuff[index] = NULL;
+ /* Make sure the updates to rest of the descriptor are performed before
+ * transferring ownership.
+ */
+ dma_wmb();
+
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
@@ -3844,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
+ /* Make sure the update to bdp are performed before txq->bd.cur. */
+ dma_wmb();
+
txq->bd.cur = bdp;
+ /* Trigger transmission start */
+ writel(0, txq->bd.reg_desc_active);
+
return 0;
}
@@ -3874,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
sent_frames++;
}
- /* Make sure the update to bdp and tx_skbuff are performed. */
- wmb();
-
- /* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
-
__netif_tx_unlock(nq);
return sent_frames;
@@ -4478,9 +4482,11 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev,
+ "Failed to resume device in remove callback (%pe)\n",
+ ERR_PTR(ret));
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -4493,8 +4499,13 @@ fec_drv_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+ /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+ * disabling them again.
+ */
+ if (ret >= 0) {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ }
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index cbbab5b2b402..b85c412683dd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
return head == hw->cmq.csq.next_to_use;
}
-static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+{
+ static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
+ {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
+ if (cmdq_tx_timeout_map[i].opcode == opcode)
+ return cmdq_tx_timeout_map[i].tx_timeout;
+
+ return tx_timeout;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
bool *is_completed)
{
+ u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
+ hw->cmq.tx_timeout);
u32 timeout = 0;
do {
@@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
}
udelay(1);
timeout++;
- } while (timeout < hw->cmq.tx_timeout);
+ } while (timeout < cmdq_tx_timeout);
}
static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
@@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
- hclge_comm_wait_for_resp(hw, &is_completed);
+ hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
+ &is_completed);
if (!is_completed)
ret = -EBADE;
@@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
/* Setup Tx write back timeout */
- cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+ cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
/* Setup queue rings */
ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index de72ecbfd5ad..18f1b4bf362d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -54,7 +54,8 @@
#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
enum hclge_opcode_type {
/* Generic commands */
@@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map {
u16 local_bit;
};
+struct hclge_cmdq_tx_timeout_map {
+ u32 opcode;
+ u32 tx_timeout;
+};
+
struct hclge_comm_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 4c3e90a1c4d0..d385ffc21876 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tx_bd_queue",
.cmd = HNAE3_DBG_CMD_TX_BD,
.dentry = HNS3_DBG_DENTRY_TX_BD,
- .buf_len = HNS3_DBG_READ_LEN_4MB,
+ .buf_len = HNS3_DBG_READ_LEN_5MB,
.init = hns3_dbg_bd_file_init,
},
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index 97578eabb7d8..4a5ef8a90a10 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -10,6 +10,7 @@
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
#define HNS3_DBG_READ_LEN_4MB 0x400000
+#define HNS3_DBG_READ_LEN_5MB 0x500000
#define HNS3_DBG_WRITE_LEN 1024
#define HNS3_DBG_DATA_STR_LEN 32
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 4fb5406c1951..2689b108f7df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET &&
- hdev->reset_type != HNAE3_FLR_RESET) {
- hclge_mac_stop_phy(hdev);
- hclge_update_link_status(hdev);
- return;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+ HCLGE_PFC_DISABLE);
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
+ hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
+ return;
+ }
}
hclge_reset_tqp(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 4a33f65190e2..922c0da3660c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
- u8 pfc_bitmap)
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap)
{
struct hclge_desc desc;
struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 68f28a98e380..dd6f1fd486cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
+#define HCLGE_PFC_DISABLE 0
+#define HCLGE_PFC_TX_RX_DISABLE 0
+
struct hclge_pfc_en_cmd {
u8 tx_rx_en_bitmap;
u8 pri_en_bitmap;
@@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index f24046250341..dd08989a4c7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
* might happen in case reset assertion was made by PF. Yes, this also
* means we might end up waiting bit more even for VF reset.
*/
- msleep(5000);
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+ msleep(5000);
+ else
+ msleep(500);
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 9abaff1f2aff..39d0fe76a38f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -525,7 +525,7 @@ void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
+void iavf_irq_enable_queues(struct iavf_adapter *adapter);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2de4baff4c20..4a66873882d1 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -359,21 +359,18 @@ static void iavf_irq_disable(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_enable_queues - Enable interrupt for specified queues
+ * iavf_irq_enable_queues - Enable interrupt for all queues
* @adapter: board private structure
- * @mask: bitmap of queues to enable
**/
-void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
+void iavf_irq_enable_queues(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & BIT(i - 1)) {
- wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
- IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
- IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
- }
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
@@ -387,7 +384,7 @@ void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
struct iavf_hw *hw = &adapter->hw;
iavf_misc_irq_enable(adapter);
- iavf_irq_enable_queues(adapter, ~0);
+ iavf_irq_enable_queues(adapter);
if (flush)
iavf_flush(hw);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
index bf793332fc9d..a19e88898a0b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_register.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
@@ -40,7 +40,7 @@
#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...63 */ /* Reset: VFR */
#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 9afbbdac3590..7c0578b5457b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_process_config(adapter);
adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
-
iavf_set_queue_vlan_tag_loc(adapter);
was_mac_changed = !ether_addr_equal(netdev->dev_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0157f6e98d3e..eb2dc0983776 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5160,7 +5160,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*/
int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 8ba5f935a092..81961a7d6598 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_sq_cd *cd);
int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
- u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index c6d4926f0fcf..850db8e0e6b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
- first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+ first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
- first->tx_flags |= (skb->priority & 0x7) <<
- ICE_TX_FLAGS_VLAN_PR_S;
+ first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 2ea8a2b11bcd..75c9de675f20 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -16,8 +16,8 @@
* * number of bytes written - success
* * negative - error code
*/
-static unsigned int
-ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+static int
+ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size)
{
struct ice_aqc_link_topo_addr link_topo;
struct ice_hw *hw = &pf->hw;
@@ -72,39 +72,7 @@ err_out:
dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
offset, size, err);
- return offset;
-}
-
-/**
- * ice_gnss_write_pending - Write all pending data to internal GNSS
- * @work: GNSS write work structure
- */
-static void ice_gnss_write_pending(struct kthread_work *work)
-{
- struct gnss_serial *gnss = container_of(work, struct gnss_serial,
- write_work);
- struct ice_pf *pf = gnss->back;
-
- if (!pf)
- return;
-
- if (!test_bit(ICE_FLAG_GNSS, pf->flags))
- return;
-
- if (!list_empty(&gnss->queue)) {
- struct gnss_write_buf *write_buf = NULL;
- unsigned int bytes;
-
- write_buf = list_first_entry(&gnss->queue,
- struct gnss_write_buf, queue);
-
- bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
- dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
-
- list_del(&write_buf->queue);
- kfree(write_buf->buf);
- kfree(write_buf);
- }
+ return err;
}
/**
@@ -128,12 +96,7 @@ static void ice_gnss_read(struct kthread_work *work)
int err = 0;
pf = gnss->back;
- if (!pf) {
- err = -EFAULT;
- goto exit;
- }
-
- if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ if (!pf || !test_bit(ICE_FLAG_GNSS, pf->flags))
return;
hw = &pf->hw;
@@ -191,7 +154,6 @@ free_buf:
free_page((unsigned long)buf);
requeue:
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay);
-exit:
if (err)
dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
}
@@ -220,8 +182,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- INIT_LIST_HEAD(&gnss->queue);
- kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
@@ -281,7 +241,6 @@ static void ice_gnss_close(struct gnss_device *gdev)
if (!gnss)
return;
- kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
}
@@ -300,10 +259,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
size_t count)
{
struct ice_pf *pf = gnss_get_drvdata(gdev);
- struct gnss_write_buf *write_buf;
struct gnss_serial *gnss;
- unsigned char *cmd_buf;
- int err = count;
/* We cannot write a single byte using our I2C implementation. */
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
@@ -319,24 +275,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
if (!gnss)
return -ENODEV;
- cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
- if (!cmd_buf)
- return -ENOMEM;
-
- memcpy(cmd_buf, buf, count);
- write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
- if (!write_buf) {
- kfree(cmd_buf);
- return -ENOMEM;
- }
-
- write_buf->buf = cmd_buf;
- write_buf->size = count;
- INIT_LIST_HEAD(&write_buf->queue);
- list_add_tail(&write_buf->queue, &gnss->queue);
- kthread_queue_work(gnss->kworker, &gnss->write_work);
-
- return err;
+ return ice_gnss_do_write(pf, buf, count);
}
static const struct gnss_operations ice_gnss_ops = {
@@ -432,7 +371,6 @@ void ice_gnss_exit(struct ice_pf *pf)
if (pf->gnss_serial) {
struct gnss_serial *gnss = pf->gnss_serial;
- kthread_cancel_work_sync(&gnss->write_work);
kthread_cancel_delayed_work_sync(&gnss->read_work);
kthread_destroy_worker(gnss->kworker);
gnss->kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index b8bb8b63d081..75e567ad7059 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -22,26 +22,16 @@
*/
#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
-struct gnss_write_buf {
- struct list_head queue;
- unsigned int size;
- unsigned char *buf;
-};
-
/**
* struct gnss_serial - data used to initialize GNSS TTY port
* @back: back pointer to PF
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
- * @write_work: write_work function for handling GNSS writes
- * @queue: write buffers queue
*/
struct gnss_serial {
struct ice_pf *back;
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
- struct kthread_work write_work;
- struct list_head queue;
};
#if IS_ENABLED(CONFIG_GNSS)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 450317dfcca7..11ae0e41f518 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2745,6 +2745,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ vsi->stat_offsets_loaded = false;
+
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
@@ -2793,6 +2795,9 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
goto unroll_vector_base;
+
+ vsi->stat_offsets_loaded = false;
+
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index a1f7c8edc22f..42c318ceff61 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4802,9 +4802,13 @@ err_init_pf:
static void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
- ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
+
+ /* Service task is already stopped, so call reset directly. */
+ ice_reset(&pf->hw, ICE_RESET_PFR);
+ pci_wait_for_pending_transaction(pf->pdev);
+ ice_clear_interrupt_scheme(pf);
}
static void ice_init_features(struct ice_pf *pf)
@@ -5094,10 +5098,6 @@ int ice_load(struct ice_pf *pf)
struct ice_vsi *vsi;
int err;
- err = ice_reset(&pf->hw, ICE_RESET_PFR);
- if (err)
- return err;
-
err = ice_init_dev(pf);
if (err)
return err;
@@ -5354,12 +5354,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- /* Issue a PFR as part of the prescribed driver unload flow. Do not
- * do it via ice_schedule_reset() since there is no need to rebuild
- * and the service task is already stopped.
- */
- ice_reset(&pf->hw, ICE_RESET_PFR);
- pci_wait_for_pending_transaction(pdev);
pci_disable_device(pdev);
}
@@ -7056,6 +7050,10 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_for_each_xdp_txq(vsi, i)
+ ice_clean_tx_ring(vsi->xdp_rings[i]);
+
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index f1dca59bd844..588ad8696756 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1171,7 +1171,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
if (!vf)
return -EINVAL;
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1286,7 +1286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto out_put_vf;
}
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1340,7 +1340,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
return -EOPNOTSUPP;
}
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
@@ -1653,7 +1653,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (!vf)
return -EINVAL;
- ret = ice_check_vf_ready_for_cfg(vf);
+ ret = ice_check_vf_ready_for_reset(vf);
if (ret)
goto out_put_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4fcf2d07eb85..52d0a126eb61 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1152,11 +1152,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
+ u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
u32 cnt = rx_ring->count;
- u32 cached_ntc = ntc;
u32 xdp_xmit = 0;
u32 cached_ntu;
bool failure;
@@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
- td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ td_tag = first->vid;
}
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
* VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
- first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+ first->vid = skb_vlan_tag_get(skb);
if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
@@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_IL2TAG2 <<
ICE_TXD_CTX_QW1_CMD_S));
- offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
- ICE_TX_FLAGS_VLAN_S;
+ offload.cd_l2tag2 = first->vid;
}
/* set up TSO offload */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fff0efe28373..166413fc33f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -127,10 +127,6 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
-#define ICE_TX_FLAGS_VLAN_M 0xffff0000
-#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
-#define ICE_TX_FLAGS_VLAN_PR_S 29
-#define ICE_TX_FLAGS_VLAN_S 16
#define ICE_XDP_PASS 0
#define ICE_XDP_CONSUMED BIT(0)
@@ -182,8 +178,9 @@ struct ice_tx_buf {
unsigned int gso_segs;
unsigned int nr_frags; /* used for mbuf XDP */
};
- u32 type:16; /* &ice_tx_buf_type */
- u32 tx_flags:16;
+ u32 tx_flags:12;
+ u32 type:4; /* &ice_tx_buf_type */
+ u32 vid:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 89fd6982df09..bf74a2f3a4f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -186,6 +186,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
}
/**
+ * ice_check_vf_ready_for_reset - check if VF is ready to be reset
+ * @vf: VF to check if it's ready to be reset
+ *
+ * The purpose of this function is to ensure that the VF is not in reset,
+ * disabled, and is both initialized and active, thus enabling us to safely
+ * initialize another reset.
+ */
+int ice_check_vf_ready_for_reset(struct ice_vf *vf)
+{
+ int ret;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+/**
* ice_trigger_vf_reset - Reset a VF on HW
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index e3cda6fb71ab..a38ef00a3679 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+int ice_check_vf_ready_for_reset(struct ice_vf *vf);
void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 97243c616d5d..f4a524f80b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3955,6 +3955,7 @@ error_handler:
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 205d577bdbba..caf91c6f52b4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
- u8 bit_shift = 0;
+ u8 bit_shift = 1;
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
- while (hash_mask >> bit_shift != 0xFF)
+ while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
bit_shift++;
/* The portion of the address that is used for the hash table
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7d60da1b7bf4..319ed601eaa1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -822,6 +822,8 @@ static int igb_set_eeprom(struct net_device *netdev,
*/
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto out;
}
/* Device's eeprom is always little-endian, word addressable */
@@ -841,6 +843,7 @@ static int igb_set_eeprom(struct net_device *netdev,
hw->nvm.ops.update(hw);
igb_set_fw_version(adapter);
+out:
kfree(eeprom_buff);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 58872a4c2540..bb3db387d49c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6947,6 +6947,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec64 ts;
+ unsigned long flags;
if (pin < 0 || pin >= IGB_N_SDP)
return;
@@ -6954,9 +6955,12 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
- s64 ns = rd32(auxstmpl);
+ u64 ns = rd32(auxstmpl);
- ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
+ ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, ns);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
ts = ns_to_timespec64(ns);
} else {
ts.tv_nsec = rd32(auxstmpl);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 1c4676882082..fa764190f270 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -254,6 +254,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ /* Zero out the buffer ring */
+ memset(tx_ring->tx_buffer_info, 0,
+ sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -267,7 +274,7 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
*/
void igc_free_tx_resources(struct igc_ring *tx_ring)
{
- igc_clean_tx_ring(tx_ring);
+ igc_disable_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
@@ -6723,6 +6730,9 @@ static void igc_remove(struct pci_dev *pdev)
igc_ptp_stop(adapter);
+ pci_disable_ptm(pdev);
+ pci_clear_master(pdev);
+
set_bit(__IGC_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5d83c887a3fc..1726297f2e0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1256,7 +1256,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
ixgbe_desc_unused(tx_ring),
TX_WAKE_THRESHOLD,
- netif_carrier_ok(tx_ring->netdev) &&
+ !netif_carrier_ok(tx_ring->netdev) ||
test_bit(__IXGBE_DOWN, &adapter->state)))
++tx_ring->tx_stats.restart_queue;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index e1853da280f9..43eb6e871351 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -981,6 +981,9 @@ int octep_device_setup(struct octep_device *oct)
oct->mmio[i].hw_addr =
ioremap(pci_resource_start(oct->pdev, i * 2),
pci_resource_len(oct->pdev, i * 2));
+ if (!oct->mmio[i].hw_addr)
+ goto unmap_prev;
+
oct->mmio[i].mapped = 1;
}
@@ -1015,7 +1018,9 @@ int octep_device_setup(struct octep_device *oct)
return 0;
unsupported_dev:
- for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
+ i = OCTEP_MMIO_REGIONS;
+unmap_prev:
+ while (i--)
iounmap(oct->mmio[i].hw_addr);
kfree(oct->conf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 4ad707e758b9..f01d057ad025 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1878,7 +1878,8 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
- if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
+ req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
@@ -4080,10 +4081,6 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
{
- /* CN10k supports 72KB FIFO size and max packet size of 64k */
- if (rvu->hw->lbk_bufsize == 0x12000)
- return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
-
return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index 51209119f0f2..9f11c1e40737 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -1164,10 +1164,8 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
{
struct npc_exact_table *table;
u16 *cnt, old_cnt;
- bool promisc;
table = rvu->hw->table;
- promisc = table->promisc_mode[drop_mcam_idx];
cnt = &table->cnt_cmd_rules[drop_mcam_idx];
old_cnt = *cnt;
@@ -1179,16 +1177,13 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
*enable_or_disable_cam = false;
- if (promisc)
- goto done;
-
- /* If all rules are deleted and not already in promisc mode; disable cam */
+ /* If all rules are deleted, disable cam */
if (!*cnt && val < 0) {
*enable_or_disable_cam = true;
goto done;
}
- /* If rule got added and not already in promisc mode; enable cam */
+ /* If rule got added, enable cam */
if (!old_cnt && val > 0) {
*enable_or_disable_cam = true;
goto done;
@@ -1443,7 +1438,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
u32 drop_mcam_idx;
bool *promisc;
bool rc;
- u32 cnt;
table = rvu->hw->table;
@@ -1466,17 +1460,8 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
return LMAC_AF_ERR_INVALID_PARAM;
}
*promisc = false;
- cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
mutex_unlock(&table->lock);
- /* If no dmac filter entries configured, disable drop rule */
- if (!cnt)
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
- else
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
-
- dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
- __func__, cgx_id, lmac_id, cnt);
return 0;
}
@@ -1494,7 +1479,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
u32 drop_mcam_idx;
bool *promisc;
bool rc;
- u32 cnt;
table = rvu->hw->table;
@@ -1517,17 +1501,8 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
return LMAC_AF_ERR_INVALID_PARAM;
}
*promisc = true;
- cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
mutex_unlock(&table->lock);
- /* If no dmac filter entries configured, disable drop rule */
- if (!cnt)
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
- else
- rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
-
- dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
- __func__, cgx_id, lmac_id, cnt);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 7045fedfd73a..7af223b0a37f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
htons(ext->lso_sb - skb_network_offset(skb));
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
ext->lso_format = pfvf->hw.lso_tsov6_idx;
-
- ipv6_hdr(skb)->payload_len =
- htons(ext->lso_sb - skb_network_offset(skb));
+ ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
__be16 l3_proto = vlan_get_protocol(skb);
struct udphdr *udph = udp_hdr(skb);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a75fd072082c..834c644b67db 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev)
eth->dsa_meta[i] = md_dst;
}
} else {
- /* Hardware special tag parsing needs to be disabled if at least
- * one MAC does not use DSA.
+ /* Hardware DSA untagging and VLAN RX offloading need to be
+ * disabled if at least one MAC does not use DSA.
*/
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
val &= ~MTK_CDMP_STAG_EN;
mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
- val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
- val &= ~MTK_CDMQ_STAG_EN;
- mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
-
mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d53de39539a8..d532883b42d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err)
{
+ const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
- if (!err)
+ if (!err || !(strcmp(namep, "unknown command opcode")))
return;
stats = &dev->cmd.stats[opcode];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f40497823e65..7c0f2adbea00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -490,7 +490,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
(u64)timestamp_low;
break;
default:
- if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+ if (tracer_event->event_id >= tracer->str_db.first_string_trace &&
tracer_event->event_id <= tracer->str_db.first_string_trace +
tracer->str_db.num_string_trace) {
tracer_event->type = TRACER_EVENT_TYPE_STRING;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b8987a404d75..8e999f238194 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -327,6 +327,7 @@ struct mlx5e_params {
unsigned int sw_mtu;
int hard_mtu;
bool ptp_rx;
+ __be32 terminate_lkey_be;
};
static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 9c94807097cb..5ce28ff7685f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -732,7 +732,8 @@ static void mlx5e_rx_compute_wqe_bulk_params(struct mlx5e_params *params,
static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct mlx5e_rq_frags_info *info)
+ struct mlx5e_rq_frags_info *info,
+ u32 *xdp_frag_size)
{
u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
int frag_size_max = DEFAULT_FRAG_SIZE;
@@ -845,6 +846,8 @@ out:
info->log_num_frags = order_base_2(info->num_frags);
+ *xdp_frag_size = info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0;
+
return 0;
}
@@ -989,7 +992,8 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
- err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+ err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info,
+ &param->xdp_frag_size);
if (err)
return err;
ndsegs = param->frags_info.num_frags;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index a5d20f6d6d9c..6800949dafbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -24,6 +24,7 @@ struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
struct mlx5e_rq_frags_info frags_info;
+ u32 xdp_frag_size;
};
struct mlx5e_sq_param {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 7ac1ad9c46de..7e8e96cc5cd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -51,7 +51,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
port_buffer->buffer[i].lossy =
MLX5_GET(bufferx_reg, buffer, lossy);
@@ -73,14 +73,24 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->buffer[i].lossy);
}
- port_buffer->headroom_size = total_used;
+ port_buffer->internal_buffers_size = 0;
+ for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) {
+ buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
+ port_buffer->internal_buffers_size +=
+ MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
+ }
+
port_buffer->port_buffer_size =
MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
- port_buffer->spare_buffer_size =
- port_buffer->port_buffer_size - total_used;
-
- mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n",
- port_buffer->port_buffer_size,
+ port_buffer->headroom_size = total_used;
+ port_buffer->spare_buffer_size = port_buffer->port_buffer_size -
+ port_buffer->internal_buffers_size -
+ port_buffer->headroom_size;
+
+ mlx5e_dbg(HW, priv,
+ "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
+ port_buffer->port_buffer_size, port_buffer->headroom_size,
+ port_buffer->internal_buffers_size,
port_buffer->spare_buffer_size);
out:
kfree(out);
@@ -206,11 +216,11 @@ static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
return 0;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
lossless_buff_count += ((port_buffer->buffer[i].size) &&
(!(port_buffer->buffer[i].lossy)));
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
err = mlx5e_port_set_sbcm(mdev, 0, i,
MLX5_INGRESS_DIR,
@@ -293,7 +303,7 @@ static int port_set_buffer(struct mlx5e_priv *priv,
if (err)
goto out;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
@@ -351,7 +361,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
{
int i;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
if (port_buffer->buffer[i].lossy) {
port_buffer->buffer[i].xoff = 0;
port_buffer->buffer[i].xon = 0;
@@ -408,7 +418,7 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev,
int err;
int i;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
prio_count = 0;
lossy_count = 0;
@@ -432,11 +442,11 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev,
}
if (changed) {
- err = port_update_pool_cfg(mdev, port_buffer);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
- err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
+ err = port_update_pool_cfg(mdev, port_buffer);
if (err)
return err;
@@ -515,7 +525,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
__func__, i, prio2buffer[i]);
@@ -530,7 +540,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
}
if (change & MLX5E_PORT_BUFFER_SIZE) {
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
@@ -544,7 +554,9 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
- if (total_used > port_buffer.port_buffer_size)
+ if (total_used > port_buffer.headroom_size &&
+ (total_used - port_buffer.headroom_size) >
+ port_buffer.spare_buffer_size)
return -EINVAL;
update_buffer = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index a6ef118de758..f4a19ffbb641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -35,7 +35,8 @@
#include "en.h"
#include "port.h"
-#define MLX5E_MAX_BUFFER 8
+#define MLX5E_MAX_NETWORK_BUFFER 8
+#define MLX5E_TOTAL_BUFFERS 10
#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
@@ -60,8 +61,9 @@ struct mlx5e_bufferx_reg {
struct mlx5e_port_buffer {
u32 port_buffer_size;
u32 spare_buffer_size;
- u32 headroom_size;
- struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER];
+ u32 headroom_size; /* Buffers 0-7 */
+ u32 internal_buffers_size; /* Buffers 8-9 */
+ struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
};
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index eb5abd0e55d9..3cbebfba582b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */
wmb();
+ mlx5e_txqsq_wake(&ptpsq->txqsq);
+
return work_done == budget;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index fc923a99b6a4..0380a04c3691 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -84,7 +84,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action,
+ struct flow_action *flow_action, int from, int to,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type)
{
@@ -96,6 +96,11 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
priv = parse_state->flow->priv;
flow_action_for_each(i, act, flow_action) {
+ if (i < from)
+ continue;
+ else if (i > to)
+ break;
+
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act || !tc_act->post_parse)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 0e6e1872ac62..d6c12d0ea55b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -112,7 +112,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action,
+ struct flow_action *flow_action, int from, int to,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index ead38ef69483..a254e728ac95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2021,6 +2021,8 @@ void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr)
{
+ if (!attr->ct_attr.ft) /* no ct action, return */
+ return;
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 20c2d2ecaf93..f0c3464f037f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -492,6 +492,19 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
mlx5e_encap_dealloc(priv, e);
}
+static void mlx5e_encap_put_locked(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
+ if (!refcount_dec_and_test(&e->refcnt))
+ return;
+ list_del(&e->route_list);
+ hash_del_rcu(&e->encap_hlist);
+ mlx5e_encap_dealloc(priv, e);
+}
+
static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -816,6 +829,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
uintptr_t hash_key;
int err = 0;
+ lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
mpls_info = &parse_attr->mpls_info[out_index];
@@ -829,7 +844,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
hash_key = hash_encap_info(&key);
- mutex_lock(&esw->offloads.encap_tbl_lock);
e = mlx5e_encap_get(priv, &key, hash_key);
/* must verify if encap is valid or not */
@@ -840,15 +854,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err;
}
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- wait_for_completion(&e->res_ready);
-
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
- if (e->compl_result < 0) {
- err = -EREMOTEIO;
- goto out_err;
- }
goto attach_flow;
}
@@ -877,15 +882,12 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
INIT_LIST_HEAD(&e->flows);
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
if (family == AF_INET)
err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
complete_all(&e->res_ready);
if (err) {
e->compl_result = err;
@@ -920,18 +922,15 @@ attach_flow:
} else {
flow_flag_set(flow, SLOW);
}
- mutex_unlock(&esw->offloads.encap_tbl_lock);
return err;
out_err:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
if (e)
- mlx5e_encap_put(priv, e);
+ mlx5e_encap_put_locked(priv, e);
return err;
out_err_init:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
kfree(tun_info);
kfree(e);
return err;
@@ -1016,6 +1015,93 @@ out_err:
return err;
}
+int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *vf_tun)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct net_device *encap_dev = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch *esw;
+ int out_index;
+ int err = 0;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return 0;
+
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+ *vf_tun = false;
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
+ int mirred_ifindex;
+
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto out;
+ }
+ err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
+ extack, &encap_dev);
+ dev_put(out_dev);
+ if (err)
+ goto out;
+
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
+ *vf_tun = true;
+
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
+ }
+
+ if (*vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return err;
+}
+
+void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ int out_index;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
+ esw_attr = attr->esw_attr;
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mlx5e_detach_encap(flow->priv, flow, attr, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
+}
+
static int cmp_route_info(struct mlx5e_route_key *a,
struct mlx5e_route_key *b)
{
@@ -1369,11 +1455,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
list_for_each_entry(flow, encap_flows, tmp_list) {
- struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
if (!mlx5e_is_offloaded_flow(flow))
continue;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
if (flow_flag_test(flow, SLOW))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index 8ad273dde40e..5d7d67687cbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -30,6 +30,15 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
+int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack,
+ bool *vf_tun);
+void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
+
struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info);
int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 47381e949f1f..879d698b6119 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
+
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index ed279f450976..36826b582484 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -86,7 +86,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
if (err)
return err;
- return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, c->napi.napi_id);
}
static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 55b38544422f..891d39b4bfd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -61,16 +61,19 @@ static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
struct xfrm_state *x = sa_entry->x;
- spin_lock(&x->lock);
+ if (sa_entry->attrs.drop)
+ return;
+
+ spin_lock_bh(&x->lock);
xfrm_state_check_expire(x);
if (x->km.state == XFRM_STATE_EXPIRED) {
sa_entry->attrs.drop = true;
- mlx5e_accel_ipsec_fs_modify(sa_entry);
- }
- spin_unlock(&x->lock);
+ spin_unlock_bh(&x->lock);
- if (sa_entry->attrs.drop)
+ mlx5e_accel_ipsec_fs_modify(sa_entry);
return;
+ }
+ spin_unlock_bh(&x->lock);
queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
MLX5_IPSEC_RESCHED);
@@ -1040,11 +1043,17 @@ err_fs:
return err;
}
-static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
+static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
{
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+}
+
+static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
+{
+ struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
+
kfree(pol_entry);
}
@@ -1065,6 +1074,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
+ .xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index df90e19066bc..a3554bde3e07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -305,7 +305,17 @@ static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
}
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
+
+ /* It is safe to execute the modify below unlocked since the only flows
+ * that could affect this HW object, are create, destroy and this work.
+ *
+ * Creation flow can't co-exist with this modify work, the destruction
+ * flow would cancel this work, and this work is a single entity that
+ * can't conflict with it self.
+ */
+ spin_unlock_bh(&sa_entry->x->lock);
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
+ spin_lock_bh(&sa_entry->x->lock);
data.data_offset_condition_operand =
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
@@ -431,7 +441,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
aso = sa_entry->ipsec->aso;
attrs = &sa_entry->attrs;
- spin_lock(&sa_entry->x->lock);
+ spin_lock_bh(&sa_entry->x->lock);
ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
if (ret)
goto unlock;
@@ -447,7 +457,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
mlx5e_ipsec_handle_limits(sa_entry);
unlock:
- spin_unlock(&sa_entry->x->lock);
+ spin_unlock_bh(&sa_entry->x->lock);
kfree(work);
}
@@ -596,7 +606,8 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
do {
ret = mlx5_aso_poll_cq(aso->aso, false);
if (ret)
- usleep_range(2, 10);
+ /* We are in atomic context */
+ udelay(10);
} while (ret && time_is_after_jiffies(expires));
spin_unlock_bh(&aso->lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 1f90594499c6..41c396e76457 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -150,10 +150,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- goto out;
- }
+ if (!in)
+ return -ENOMEM;
if (enable_uc_lb)
lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
@@ -171,14 +169,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in);
if (err)
- goto out;
+ break;
}
+ mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
-out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
- mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 89de92d06483..ebee52a8361a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -926,9 +926,10 @@ static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
if (err)
return err;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
- dcb_buffer->total_size = port_buffer.port_buffer_size;
+ dcb_buffer->total_size = port_buffer.port_buffer_size -
+ port_buffer.internal_buffers_size;
return 0;
}
@@ -970,7 +971,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (err)
return err;
- for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
changed |= MLX5E_PORT_BUFFER_SIZE;
buffer_size = dcb_buffer->buffer_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2944691f06ad..a5bdf78955d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -641,7 +641,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
}
static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_rq *rq)
+ u32 xdp_frag_size, struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = c->mdev;
int err;
@@ -665,7 +665,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
+ return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
+ xdp_frag_size);
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
@@ -727,26 +728,6 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
mlx5e_rq_shampo_hd_free(rq);
}
-static __be32 mlx5e_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
-{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
- int res;
-
- if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
- return MLX5_TERMINATE_SCATTER_LIST_LKEY;
-
- MLX5_SET(query_special_contexts_in, in, opcode,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- res = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
- if (res)
- return MLX5_TERMINATE_SCATTER_LIST_LKEY;
-
- res = MLX5_GET(query_special_contexts_out, out,
- terminate_scatter_list_mkey);
- return cpu_to_be32(res);
-}
-
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
@@ -908,7 +889,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
/* check if num_frags is not a pow of two */
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
wqe->data[f].byte_count = 0;
- wqe->data[f].lkey = mlx5e_get_terminate_scatter_list_mkey(mdev);
+ wqe->data[f].lkey = params->terminate_lkey_be;
wqe->data[f].addr = 0;
}
}
@@ -2260,7 +2241,7 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
{
int err;
- err = mlx5e_init_rxq_rq(c, params, &c->rq);
+ err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
if (err)
return err;
@@ -5007,6 +4988,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
+
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
@@ -5279,12 +5262,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
+ priv->dfs_root = debugfs_create_dir("nic",
+ mlx5_debugfs_get_dev_root(mdev));
+
fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state),
priv->dfs_root);
if (!fs) {
err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+ debugfs_remove_recursive(priv->dfs_root);
return err;
}
priv->fs = fs;
@@ -5305,6 +5292,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
+ debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
}
@@ -5851,8 +5839,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
}
static int
-mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *new_profile, void *new_ppriv)
+mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -5868,6 +5856,25 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
err = new_profile->init(priv->mdev, priv->netdev);
if (err)
goto priv_cleanup;
+
+ return 0;
+
+priv_cleanup:
+ mlx5e_priv_cleanup(priv);
+ return err;
+}
+
+static int
+mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
+ if (err)
+ return err;
+
err = mlx5e_attach_netdev(priv);
if (err)
goto profile_cleanup;
@@ -5875,7 +5882,6 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
profile_cleanup:
new_profile->cleanup(priv);
-priv_cleanup:
mlx5e_priv_cleanup(priv);
return err;
}
@@ -5894,6 +5900,12 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
priv->profile->cleanup(priv);
mlx5e_priv_cleanup(priv);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ return -EIO;
+ }
+
err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
if (err) { /* roll back to original profile */
netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
@@ -5955,8 +5967,11 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (!netif_device_present(netdev))
+ if (!netif_device_present(netdev)) {
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5e_destroy_mdev_resources(mdev);
return -ENODEV;
+ }
mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
@@ -6002,9 +6017,6 @@ static int mlx5e_probe(struct auxiliary_device *adev,
priv->profile = profile;
priv->ppriv = NULL;
- priv->dfs_root = debugfs_create_dir("nic",
- mlx5_debugfs_get_dev_root(priv->mdev));
-
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
@@ -6033,7 +6045,6 @@ err_resume:
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
- debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
err_devlink_port_unregister:
mlx5e_devlink_port_unregister(mlx5e_dev);
@@ -6053,7 +6064,6 @@ static void mlx5e_remove(struct auxiliary_device *adev)
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
- debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 1fc386eccaf8..3e7041bd5705 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
@@ -812,11 +813,15 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ priv->dfs_root = debugfs_create_dir("nic",
+ mlx5_debugfs_get_dev_root(mdev));
+
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state),
priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
+ debugfs_remove_recursive(priv->dfs_root);
return -ENOMEM;
}
@@ -829,6 +834,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
+ debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 728b82ce4031..b9b1da751a3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1439,6 +1439,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mlx5e_hairpin_flow_del(priv, flow);
free_flow_post_acts(flow);
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -1665,11 +1666,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
{
struct mlx5e_priv *out_priv, *route_priv;
- struct mlx5_devcom *devcom = NULL;
struct mlx5_core_dev *route_mdev;
struct mlx5_eswitch *esw;
u16 vhca_id;
- int err;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
@@ -1678,6 +1677,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
if (mlx5_lag_is_active(out_priv->mdev)) {
+ struct mlx5_devcom *devcom;
+ int err;
+
/* In lag case we may get devices from different eswitch instances.
* If we failed to get vport num, it means, mostly, that we on the wrong
* eswitch.
@@ -1686,101 +1688,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
if (err != -ENOENT)
return err;
+ rcu_read_lock();
devcom = out_priv->mdev->priv.devcom;
- esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!esw)
- return -ENODEV;
- }
-
- err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
- if (devcom)
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- return err;
-}
-
-static int
-set_encap_dests(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr,
- struct netlink_ext_ack *extack,
- bool *vf_tun)
-{
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_esw_flow_attr *esw_attr;
- struct net_device *encap_dev = NULL;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *out_priv;
- int out_index;
- int err = 0;
-
- if (!mlx5e_is_eswitch_flow(flow))
- return 0;
-
- parse_attr = attr->parse_attr;
- esw_attr = attr->esw_attr;
- *vf_tun = false;
-
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- struct net_device *out_dev;
- int mirred_ifindex;
-
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
- if (!out_dev) {
- NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
- err = -ENODEV;
- goto out;
- }
- err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
- extack, &encap_dev);
- dev_put(out_dev);
- if (err)
- goto out;
-
- if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
- !esw_attr->dest_int_port)
- *vf_tun = true;
+ esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
+ rcu_read_unlock();
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[out_index].rep = rpriv->rep;
- esw_attr->dests[out_index].mdev = out_priv->mdev;
- }
-
- if (*vf_tun && esw_attr->out_count > 1) {
- NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
- err = -EOPNOTSUPP;
- goto out;
+ return err;
}
-out:
- return err;
-}
-
-static void
-clean_encap_dests(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr)
-{
- struct mlx5_esw_flow_attr *esw_attr;
- int out_index;
-
- if (!mlx5e_is_eswitch_flow(flow))
- return;
-
- esw_attr = attr->esw_attr;
-
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
- if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
- continue;
-
- mlx5e_detach_encap(priv, flow, attr, out_index);
- kfree(attr->parse_attr->tun_info[out_index]);
- }
+ return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
}
static int
@@ -1819,7 +1736,7 @@ post_process_attr(struct mlx5e_tc_flow *flow,
if (err)
goto err_out;
- err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+ err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
@@ -3943,8 +3860,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *prev_attr;
struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
+ int err, i, i_split = 0;
bool is_missable;
- int err, i;
ns_type = mlx5e_get_flow_namespace(flow);
list_add(&attr->list, &flow->attrs);
@@ -3985,7 +3902,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
i < flow_action->num_entries - 1)) {
is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
- err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr,
+ ns_type);
if (err)
goto out_free_post_acts;
@@ -3995,6 +3913,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
goto out_free_post_acts;
}
+ i_split = i + 1;
list_add(&attr->list, &flow->attrs);
}
@@ -4009,7 +3928,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
}
}
- err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type);
if (err)
goto out_free_post_acts;
@@ -4323,7 +4242,7 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a
if (attr->post_act_handle)
mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
- clean_encap_dests(flow->priv, flow, attr);
+ mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(counter_dev, attr->counter);
@@ -5301,6 +5220,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
+ mlx5_esw_offloads_devcom_init(esw);
+
return 0;
err_action_counter:
@@ -5329,7 +5250,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
- mlx5e_tc_clean_fdb_peer_flows(esw);
+ mlx5_esw_offloads_devcom_cleanup(esw);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
@@ -5643,22 +5564,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
0, NULL);
}
+static struct mapping_ctx *
+mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc;
+ struct mlx5_eswitch *esw;
+ struct mapping_ctx *ctx;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ esw = priv->mdev->priv.eswitch;
+ ctx = esw->offloads.reg_c0_obj_pool;
+ } else {
+ tc = mlx5e_fs_get_tc(priv->fs);
+ ctx = tc->mapping;
+ }
+
+ return ctx;
+}
+
int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u64 act_miss_cookie, u32 *act_miss_mapping)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_mapped_obj mapped_obj = {};
+ struct mlx5_eswitch *esw;
struct mapping_ctx *ctx;
int err;
- ctx = esw->offloads.reg_c0_obj_pool;
-
+ ctx = mlx5e_get_priv_obj_mapping(priv);
mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
mapped_obj.act_miss_cookie = act_miss_cookie;
err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
if (err)
return err;
+ if (!is_mdev_switchdev_mode(priv->mdev))
+ return 0;
+
+ esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
if (IS_ERR(attr->act_id_restore_rule))
goto err_rule;
@@ -5673,10 +5615,9 @@ err_rule:
void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u32 act_miss_mapping)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mapping_ctx *ctx;
+ struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
- ctx = esw->offloads.reg_c0_obj_pool;
- mlx5_del_flow_rules(attr->act_id_restore_rule);
+ if (is_mdev_switchdev_mode(priv->mdev))
+ mlx5_del_flow_rules(attr->act_id_restore_rule);
mapping_remove(ctx, act_miss_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index df5e780e8e6a..c7eb6b238c2b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
}
}
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
+{
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+ mlx5e_ptpsq_fifo_has_room(sq) &&
+ !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats->wake++;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq_stats *stats;
@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
- if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
- mlx5e_ptpsq_fifo_has_room(sq) &&
- !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
- netif_tx_wake_queue(sq->txq);
- stats->wake++;
- }
+ mlx5e_txqsq_wake(sq);
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a50bfda18e96..fbb2d963fb7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
}
}
+ /* budget=0 means we may be in IRQ context, do as little as possible */
+ if (unlikely(!budget))
+ goto out;
+
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
- if (likely(budget)) { /* budget=0 means: don't poll rx rings */
- if (xsk_open)
- work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
+ if (xsk_open)
+ work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
- if (likely(budget - work_done))
- work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
+ if (likely(budget - work_done))
+ work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
- busy |= work_done == budget;
- }
+ busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1c35d721a31d..3db4866d7880 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -824,7 +824,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
if (!cpus)
- ret = -ENOMEM;
+ return -ENOMEM;
i = 0;
rcu_read_lock();
@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
- mlx5_irq_table_destroy(dev);
+ mlx5_irq_table_free_irqs(dev);
mutex_unlock(&table->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 1a042c981713..add6cfa432a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -342,6 +342,7 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
+ bool paired[MLX5_MAX_PORTS];
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 69215ffb9999..8d19c20d3447 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
+ if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+ break;
+
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
if (err)
goto err_pair;
+ esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
+ peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
- if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
break;
mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
+ peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
mlx5_esw_offloads_unpair(peer_esw);
mlx5_esw_offloads_unpair(esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
@@ -2779,7 +2786,7 @@ err_out:
return err;
}
-static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
ESW_OFFLOADS_DEVCOM_PAIR, esw);
}
-static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
if (err)
goto err_vports;
- esw_offloads_devcom_init(esw);
-
return 0;
err_vports:
@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
- esw_offloads_devcom_cleanup(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 144e59480686..ec83e6483d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -511,10 +511,11 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
+ int reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
+ u32 *in, action;
void *in_dests;
- u32 *in;
int err;
if (mlx5_set_extended_dest(dev, fte, &extended_dest))
@@ -553,22 +554,42 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
- if (extended_dest) {
- u32 action;
- action = fte->action.action &
- ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- MLX5_SET(flow_context, in_flow_context, action, action);
- } else {
- MLX5_SET(flow_context, in_flow_context, action,
- fte->action.action);
- if (fte->action.pkt_reformat)
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
- fte->action.pkt_reformat->id);
+ action = fte->action.action;
+ if (extended_dest)
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+
+ MLX5_SET(flow_context, in_flow_context, action, action);
+
+ if (!extended_dest && fte->action.pkt_reformat) {
+ struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
+
+ if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
+ reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
+ if (reformat_id < 0) {
+ mlx5_core_err(dev,
+ "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
+ pkt_reformat->reformat_type);
+ err = reformat_id;
+ goto err_out;
+ }
+ } else {
+ reformat_id = fte->action.pkt_reformat->id;
+ }
}
- if (fte->action.modify_hdr)
+
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
+
+ if (fte->action.modify_hdr) {
+ if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
+ mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
+ }
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
fte->action.crypto.type);
@@ -885,6 +906,8 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
out, packet_reformat_id);
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW;
+
kfree(in);
return err;
}
@@ -969,6 +992,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW;
kfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index f137a0611b77..b043190e50a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -54,8 +54,14 @@ struct mlx5_flow_definer {
u32 id;
};
+enum mlx5_flow_resource_owner {
+ MLX5_FLOW_RESOURCE_OWNER_FW,
+ MLX5_FLOW_RESOURCE_OWNER_SW,
+};
+
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
+ enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action action;
u32 id;
@@ -65,6 +71,7 @@ struct mlx5_modify_hdr {
struct mlx5_pkt_reformat {
enum mlx5_flow_namespace_type ns_type;
int reformat_type; /* from mlx5_ifc */
+ enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action action;
u32 id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index adefde3ea941..b7d779d08d83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
+#include "mlx5_core.h"
static LIST_HEAD(devcom_list);
@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list);
struct mlx5_devcom_component {
struct {
- void *data;
+ void __rcu *data;
} device[MLX5_DEVCOM_PORTS_SUPPORTED];
mlx5_devcom_event_handler_t handler;
@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
+ mlx5_dev_list_lock();
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
struct mlx5_core_dev *tmp_dev = NULL;
@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!priv) {
priv = mlx5_devcom_list_alloc();
- if (!priv)
- return ERR_PTR(-ENOMEM);
+ if (!priv) {
+ devcom = ERR_PTR(-ENOMEM);
+ goto out;
+ }
idx = 0;
new_priv = true;
@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
priv->devs[idx] = dev;
devcom = mlx5_devcom_alloc(priv, idx);
if (!devcom) {
- kfree(priv);
- return ERR_PTR(-ENOMEM);
+ if (new_priv)
+ kfree(priv);
+ devcom = ERR_PTR(-ENOMEM);
+ goto out;
}
if (new_priv)
list_add(&priv->list, &devcom_list);
-
+out:
+ mlx5_dev_list_unlock();
return devcom;
}
@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
if (IS_ERR_OR_NULL(devcom))
return;
+ mlx5_dev_list_lock();
priv = devcom->priv;
priv->devs[devcom->idx] = NULL;
@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
break;
if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
- return;
+ goto out;
list_del(&priv->list);
kfree(priv);
+out:
+ mlx5_dev_list_unlock();
}
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
comp->handler = handler;
- comp->device[devcom->idx].data = data;
+ rcu_assign_pointer(comp->device[devcom->idx].data, data);
up_write(&comp->sem);
}
@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
- comp->device[devcom->idx].data = NULL;
+ RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
up_write(&comp->sem);
+ synchronize_rcu();
}
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_write(&comp->sem);
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
- if (i != devcom->idx && comp->device[i].data) {
- err = comp->handler(event, comp->device[i].data,
- event_data);
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
+ void *data = rcu_dereference_protected(comp->device[i].data,
+ lockdep_is_held(&comp->sem));
+
+ if (i != devcom->idx && data) {
+ err = comp->handler(event, data, event_data);
break;
}
+ }
up_write(&comp->sem);
return err;
@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem));
- comp->paired = paired;
+ WRITE_ONCE(comp->paired, paired);
}
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
if (IS_ERR_OR_NULL(devcom))
return false;
- return devcom->priv->components[id].paired;
+ return READ_ONCE(devcom->priv->components[id].paired);
}
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
comp = &devcom->priv->components[id];
down_read(&comp->sem);
- if (!comp->paired) {
+ if (!READ_ONCE(comp->paired)) {
up_read(&comp->sem);
return NULL;
}
@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
if (i != devcom->idx)
break;
- return comp->device[i].data;
+ return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
+}
+
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp;
+ int i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return NULL;
+
+ for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
+ if (i != devcom->idx)
+ break;
+
+ comp = &devcom->priv->components[id];
+ /* This can change concurrently, however 'data' pointer will remain
+ * valid for the duration of RCU read section.
+ */
+ if (!READ_ONCE(comp->paired))
+ return NULL;
+
+ return rcu_dereference(comp->device[i].data);
}
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 94313c18bb64..9a496f4722da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 995eb2d5ace0..d6ee016deae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -923,7 +923,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
return 0;
err_clr_master:
@@ -1049,7 +1048,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
- mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+ mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
@@ -1155,6 +1154,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
goto err_cmd_cleanup;
}
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
mlx5_start_health_poll(dev);
@@ -1802,15 +1802,16 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
- * fw_reset before unregistering the devlink.
+ /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
+ * devlink notify APIs.
+ * Hence, we must drain them before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
+ mlx5_drain_health_wq(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_thermal_uninit(dev);
mlx5_crdump_disable(dev);
- mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 1d879374acaa..229520405d4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -276,18 +276,6 @@ static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
return pci_num_vf(dev->pdev) ? true : false;
}
-static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
-{
- /* LACP owner conditions:
- * 1) Function is physical.
- * 2) LAG is supported by FW.
- * 3) LAG is managed by driver (currently the only option).
- */
- return MLX5_CAP_GEN(dev, vport_group_manager) &&
- (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
- MLX5_CAP_GEN(dev, lag_master);
-}
-
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev);
static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index efd0c299c5c7..aa403a5ea34e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 9d735c343a3b..678f0be81375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
#include "mlx5_core.h"
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
@@ -122,3 +123,23 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
return mlx5_cmd_exec_in(dev, destroy_psv, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);
+
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ u32 mkey;
+
+ if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
+ return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ if (mlx5_cmd_exec_inout(dev, query_special_contexts, in, out))
+ return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+ mkey = MLX5_GET(query_special_contexts_out, out,
+ terminate_scatter_list_mkey);
+ return cpu_to_be32(mkey);
+}
+EXPORT_SYMBOL(mlx5_core_get_terminate_scatter_list_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 2245d3b2f393..98412bd5a696 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -32,6 +32,7 @@ struct mlx5_irq {
struct mlx5_irq_pool *pool;
int refcount;
struct msi_map map;
+ u32 pool_index;
};
struct mlx5_irq_table {
@@ -125,14 +126,22 @@ out:
return ret;
}
-static void irq_release(struct mlx5_irq *irq)
+/* mlx5_system_free_irq - Free an IRQ
+ * @irq: IRQ to free
+ *
+ * Free the IRQ and other resources such as rmap from the system.
+ * BUT doesn't free or remove reference from mlx5.
+ * This function is very important for the shutdown flow, where we need to
+ * cleanup system resoruces but keep mlx5 objects alive,
+ * see mlx5_irq_table_free_irqs().
+ */
+static void mlx5_system_free_irq(struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = irq->pool;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
- xa_erase(&pool->irqs, irq->map.index);
/* free_irq requires that affinity_hint and rmap will be cleared before
* calling it. To satisfy this requirement, we call
* irq_cpu_rmap_remove() to remove the notifier
@@ -140,14 +149,22 @@ static void irq_release(struct mlx5_irq *irq)
irq_update_affinity_hint(irq->map.virq, NULL);
#ifdef CONFIG_RFS_ACCEL
rmap = mlx5_eq_table_get_rmap(pool->dev);
- if (rmap && irq->map.index)
+ if (rmap)
irq_cpu_rmap_remove(rmap, irq->map.virq);
#endif
- free_cpumask_var(irq->mask);
free_irq(irq->map.virq, &irq->nh);
if (irq->map.index && pci_msix_can_alloc_dyn(pool->dev->pdev))
pci_msix_free_irq(pool->dev->pdev, irq->map);
+}
+
+static void irq_release(struct mlx5_irq *irq)
+{
+ struct mlx5_irq_pool *pool = irq->pool;
+
+ xa_erase(&pool->irqs, irq->pool_index);
+ mlx5_system_free_irq(irq);
+ free_cpumask_var(irq->mask);
kfree(irq);
}
@@ -231,12 +248,13 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
if (!irq)
return ERR_PTR(-ENOMEM);
if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
- /* The vector at index 0 was already allocated.
- * Just get the irq number. If dynamic irq is not supported
- * vectors have also been allocated.
+ /* The vector at index 0 is always statically allocated. If
+ * dynamic irq is not supported all vectors are statically
+ * allocated. In both cases just get the irq number and set
+ * the index.
*/
irq->map.virq = pci_irq_vector(dev->pdev, i);
- irq->map.index = 0;
+ irq->map.index = i;
} else {
irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc);
if (!irq->map.virq) {
@@ -276,11 +294,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
}
irq->pool = pool;
irq->refcount = 1;
- irq->map.index = i;
- err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL));
+ irq->pool_index = i;
+ err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
if (err) {
mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
- irq->map.index, err);
+ irq->pool_index, err);
goto err_xa;
}
return irq;
@@ -563,17 +581,23 @@ void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
struct mlx5_irq **irqs, struct cpu_rmap **rmap)
{
+ struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
struct mlx5_irq *irq;
+ int offset = 1;
int i;
- af_desc.is_managed = 1;
+ if (!pool->xa_num_irqs.max)
+ offset = 0;
+
+ af_desc.is_managed = false;
for (i = 0; i < nirqs; i++) {
+ cpumask_clear(&af_desc.mask);
cpumask_set_cpu(cpus[i], &af_desc.mask);
- irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap);
+ irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap);
if (IS_ERR(irq))
break;
- cpumask_clear(&af_desc.mask);
irqs[i] = irq;
}
@@ -691,6 +715,25 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
irq_pool_free(table->pcif_pool);
}
+static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
+{
+ struct mlx5_irq *irq;
+ unsigned long index;
+
+ xa_for_each(&pool->irqs, index, irq)
+ mlx5_system_free_irq(irq);
+
+}
+
+static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
+{
+ if (table->sf_ctrl_pool) {
+ mlx5_irq_pool_free_irqs(table->sf_comp_pool);
+ mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
+ }
+ mlx5_irq_pool_free_irqs(table->pcif_pool);
+}
+
/* irq_table API */
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
@@ -774,6 +817,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
pci_free_irq_vectors(dev->pdev);
}
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_table *table = dev->priv.irq_table;
+
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_irq_pools_free_irqs(table);
+ pci_free_irq_vectors(dev->pdev);
+}
+
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{
if (table->sf_comp_pool)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index e2f26d0bc615..0692363cf80e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -63,6 +63,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ mlx5_drain_health_wq(sf_dev->mdev);
devlink_unregister(devlink);
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 0eb9a8d7f282..0f783e7906cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1421,9 +1421,13 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
- u8 hw_actions[DR_ACTION_CACHE_LINE_SIZE] = {};
+ u8 *hw_actions;
int ret;
+ hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL);
+ if (!hw_actions)
+ return -ENOMEM;
+
ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
data, data_sz,
hw_actions,
@@ -1431,6 +1435,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
&action->rewrite->num_of_actions);
if (ret) {
mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
+ kfree(hw_actions);
return ret;
}
@@ -1440,6 +1445,7 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
ret = mlx5dr_ste_alloc_modify_hdr(action);
if (ret) {
mlx5dr_dbg(dmn, "Failed preparing reformat data\n");
+ kfree(hw_actions);
return ret;
}
return 0;
@@ -2129,6 +2135,11 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
return action;
}
+u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action)
+{
+ return action->reformat->id;
+}
+
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 3835ba3f4dda..1aa525e509f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
+ caps->roce_caps.fl_rc_qp_when_roce_disabled =
+ MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
if (MLX5_CAP_GEN(mdev, roce)) {
err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
return err;
caps->roce_caps.roce_en = roce_en;
- caps->roce_caps.fl_rc_qp_when_roce_disabled =
+ caps->roce_caps.fl_rc_qp_when_roce_disabled |=
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
caps->roce_caps.fl_rc_qp_when_roce_enabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
index 13e06a6a6b22..d6947fe13d56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
@@ -213,6 +213,8 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
}
INIT_LIST_HEAD(&mgr->ptrn_list);
+ mutex_init(&mgr->modify_hdr_mutex);
+
return mgr;
free_mgr:
@@ -237,5 +239,6 @@ void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
}
mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
+ mutex_destroy(&mgr->modify_hdr_mutex);
kfree(mgr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 9413aaf51251..e94fbb015efa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
{
u32 crc = crc32(0, input_data, length);
- return (__force u32)htonl(crc);
+ return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+ ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
}
bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 984653756779..cc215beb7436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -331,8 +331,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
- bool is_decap = fte->action.pkt_reformat->reformat_type ==
- MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ bool is_decap;
+
+ if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ err = -EINVAL;
+ mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
+ goto free_actions;
+ }
+
+ is_decap = fte->action.pkt_reformat->reformat_type ==
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
@@ -661,6 +669,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
return -EINVAL;
}
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
pkt_reformat->action.dr_action = action;
return 0;
@@ -691,6 +700,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
return -EINVAL;
}
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
modify_hdr->action.dr_action = action;
return 0;
@@ -816,6 +826,19 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
+int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+{
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action);
+ }
+ return -EOPNOTSUPP;
+}
+
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return mlx5dr_is_supported(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
index d168622063d5..99a3b2eff6b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
@@ -38,6 +38,8 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
+int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
#else
@@ -47,6 +49,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
+static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+{
+ return 0;
+}
+
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
{
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 9afd268a2573..d1c04f43d86d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -150,6 +150,8 @@ mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
int mlx5dr_action_destroy(struct mlx5dr_action *action);
+u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
+
int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
u8 *dw_selectors, u8 *byte_selectors,
u8 *match_mask, u32 *definer_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
index e47fa6fb836f..20bb5eb266c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
@@ -45,7 +45,7 @@ static int mlx5_thermal_get_mtmp_temp(struct mlx5_core_dev *mdev, u32 id, int *p
static int mlx5_thermal_get_temp(struct thermal_zone_device *tzdev,
int *p_temp)
{
- struct mlx5_thermal *thermal = tzdev->devdata;
+ struct mlx5_thermal *thermal = thermal_zone_device_priv(tzdev);
struct mlx5_core_dev *mdev = thermal->mdev;
int err;
@@ -81,12 +81,13 @@ int mlx5_thermal_init(struct mlx5_core_dev *mdev)
return -ENOMEM;
thermal->mdev = mdev;
- thermal->tzdev = thermal_zone_device_register(data,
- MLX5_THERMAL_NUM_TRIPS,
- MLX5_THERMAL_TRIP_MASK,
- thermal,
- &mlx5_thermal_ops,
- NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
+ thermal->tzdev = thermal_zone_device_register_with_trips(data,
+ NULL,
+ MLX5_THERMAL_NUM_TRIPS,
+ MLX5_THERMAL_TRIP_MASK,
+ thermal,
+ &mlx5_thermal_ops,
+ NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
if (IS_ERR(thermal->tzdev)) {
dev_err(mdev->device, "Failed to register thermal zone device (%s) %ld\n",
data, PTR_ERR(thermal->tzdev));
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index afa3b92a6905..0d5a41a2ae01 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -245,12 +245,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
skb = priv->rx_skb[rx_pi_rem];
- skb_put(skb, datalen);
-
- skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
-
- skb->protocol = eth_type_trans(skb, netdev);
-
/* Alloc another RX SKB for this same index */
rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
&rx_buf_dma, DMA_FROM_DEVICE);
@@ -259,6 +253,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
priv->rx_skb[rx_pi_rem] = rx_skb;
dma_unmap_single(priv->dev, *rx_wqe_addr,
MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+
+ skb_put(skb, datalen);
+
+ skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
*rx_wqe_addr = rx_buf_dma;
} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
priv->stats.rx_mac_errors++;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2b6e046e1d10..ee2698698d71 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
reset_control_reset(switch_reset);
+ /* Don't reinitialize the switch core, if it is already initialized. In
+ * case it is initialized twice, some pointers inside the queue system
+ * in HW will get corrupted and then after a while the queue system gets
+ * full and no traffic is passing through the switch. The issue is seen
+ * when loading and unloading the driver and sending traffic through the
+ * switch.
+ */
+ if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
+ return 0;
+
lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
ret = readx_poll_timeout(lan966x_ram_init, lan966x,
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 06d6292e09b3..d907727c7b7a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1279,8 +1279,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
if (comp_read < 1)
return;
- apc->eth_stats.tx_cqes = comp_read;
-
for (i = 0; i < comp_read; i++) {
struct mana_tx_comp_oob *cqe_oob;
@@ -1363,8 +1361,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
WARN_ON_ONCE(1);
cq->work_done = pkt_transmitted;
-
- apc->eth_stats.tx_cqes -= pkt_transmitted;
}
static void mana_post_pkt_rxq(struct mana_rxq *rxq)
@@ -1626,15 +1622,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
struct mana_rxq *rxq = cq->rxq;
- struct mana_port_context *apc;
int comp_read, i;
- apc = netdev_priv(rxq->ndev);
-
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
- apc->eth_stats.rx_cqes = comp_read;
rxq->xdp_flush = false;
for (i = 0; i < comp_read; i++) {
@@ -1646,8 +1638,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
return;
mana_process_rx_cqe(rxq, cq, &comp[i]);
-
- apc->eth_stats.rx_cqes--;
}
if (rxq->xdp_flush)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index a64c81410dc1..0dc78679f620 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,11 +13,9 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
- {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
tx_cqe_unknown_type)},
- {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
{"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
rx_coalesced_err)},
{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h
index 094374df42b8..38b8b10b03cd 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.h
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.h
@@ -8,7 +8,7 @@
#ifdef CONFIG_DCB
/* DCB feature definitions */
-#define NFP_NET_MAX_DSCP 4
+#define NFP_NET_MAX_DSCP 64
#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS
#define NFP_NET_MAX_PRIO 8
#define NFP_DCB_CFG_STRIDE 256
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0605d1ee490d..7a549b834e97 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
return 0;
out_error:
+ nv_mgmt_release_sema(dev);
if (phystate_orig)
writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
out_freering:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 2edd6bf64a3c..7776d3bdd459 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
- if (!cdev) {
+ if (!cdev || cdev->recov_in_prog) {
memset(stats, 0, sizeof(*stats));
return;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f9931ecb7baa..4d83ceebdc49 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -269,6 +269,10 @@ struct qede_dev {
#define QEDE_ERR_WARN 3
struct qede_dump_info dump_info;
+ struct delayed_work periodic_task;
+ unsigned long stats_coal_ticks;
+ u32 stats_coal_usecs;
+ spinlock_t stats_lock; /* lock for vport stats access */
};
enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 374a86b875a3..95820cf1cd6c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -429,6 +429,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
}
+ spin_lock(&edev->stats_lock);
+
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
@@ -438,6 +440,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
buf++;
}
+ spin_unlock(&edev->stats_lock);
+
__qede_unlock(edev);
}
@@ -829,6 +833,7 @@ out:
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
+ coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
return rc;
}
@@ -842,6 +847,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
int i, rc = 0;
u16 rxc, txc;
+ if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
+ edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
+ if (edev->stats_coal_usecs) {
+ edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
+ schedule_delayed_work(&edev->periodic_task, 0);
+
+ DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
+ edev->stats_coal_ticks);
+ } else {
+ cancel_delayed_work_sync(&edev->periodic_task);
+ }
+ }
+
if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n");
return -EINVAL;
@@ -2252,7 +2270,8 @@ out:
}
static const struct ethtool_ops qede_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
@@ -2303,7 +2322,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4c6c685820e3..4b004a728190 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -307,6 +307,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
+ spin_lock(&edev->stats_lock);
+
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
p_common->ttl0_discard = stats.common.ttl0_discard;
@@ -404,6 +406,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
+
+ spin_unlock(&edev->stats_lock);
}
static void qede_get_stats64(struct net_device *dev,
@@ -412,9 +416,10 @@ static void qede_get_stats64(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
- qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
+ spin_lock(&edev->stats_lock);
+
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
@@ -434,6 +439,8 @@ static void qede_get_stats64(struct net_device *dev,
stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
+
+ spin_unlock(&edev->stats_lock);
}
#ifdef CONFIG_QED_SRIOV
@@ -1063,6 +1070,23 @@ static void qede_unlock(struct qede_dev *edev)
rtnl_unlock();
}
+static void qede_periodic_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ periodic_task.work);
+
+ qede_fill_by_demand_stats(edev);
+ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
+}
+
+static void qede_init_periodic_task(struct qede_dev *edev)
+{
+ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
+ spin_lock_init(&edev->stats_lock);
+ edev->stats_coal_usecs = USEC_PER_SEC;
+ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
+}
+
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
@@ -1082,6 +1106,7 @@ static void qede_sp_task(struct work_struct *work)
*/
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+ cancel_delayed_work_sync(&edev->periodic_task);
#ifdef CONFIG_QED_SRIOV
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
@@ -1272,6 +1297,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
*/
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ qede_init_periodic_task(edev);
rc = register_netdev(edev->ndev);
if (rc) {
@@ -1296,6 +1322,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
qede_log_probe(edev);
+
+ /* retain user config (for example - after recovery) */
+ if (edev->stats_coal_usecs)
+ schedule_delayed_work(&edev->periodic_task, 0);
+
return 0;
err4:
@@ -1364,6 +1395,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
+ cancel_delayed_work_sync(&edev->periodic_task);
edev->ops->common->set_power_state(cdev, PCI_D0);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index c865a4be05ee..4a1b94e5a8ea 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -582,8 +582,7 @@ qcaspi_spi_thread(void *data)
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if ((qca->intr_req == qca->intr_svc) &&
- (qca->txr.skb[qca->txr.head] == NULL) &&
- (qca->sync == QCASPI_SYNC_READY))
+ !qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a7e376e7e689..4b19803a7dd0 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -616,10 +616,10 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- spinlock_t config25_lock;
- spinlock_t mac_ocp_lock;
+ raw_spinlock_t config25_lock;
+ raw_spinlock_t mac_ocp_lock;
- spinlock_t cfg9346_usage_lock;
+ raw_spinlock_t cfg9346_usage_lock;
int cfg9346_usage_count;
unsigned supports_gmii:1;
@@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
unsigned long flags;
- spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+ raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
if (!--tp->cfg9346_usage_count)
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
unsigned long flags;
- spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+ raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
if (!tp->cfg9346_usage_count++)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
unsigned long flags;
u8 val;
- spin_lock_irqsave(&tp->config25_lock, flags);
+ raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config2);
RTL_W8(tp, Config2, (val & ~clear) | set);
- spin_unlock_irqrestore(&tp->config25_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
@@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
unsigned long flags;
u8 val;
- spin_lock_irqsave(&tp->config25_lock, flags);
+ raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config5);
RTL_W8(tp, Config5, (val & ~clear) | set);
- spin_unlock_irqrestore(&tp->config25_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
unsigned long flags;
- spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
__r8168_mac_ocp_write(tp, reg, data);
- spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
unsigned long flags;
u16 val;
- spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
val = __r8168_mac_ocp_read(tp, reg);
- spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
return val;
}
@@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
unsigned long flags;
u16 data;
- spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
data = __r8168_mac_ocp_read(tp, reg);
__r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
- spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
/* Work around a hw issue with RTL8168g PHY, the quirk disables
@@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
}
- spin_lock_irqsave(&tp->config25_lock, flags);
+ raw_spin_lock_irqsave(&tp->config25_lock, flags);
for (i = 0; i < tmp; i++) {
options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(tp, cfg[i].reg, options);
}
- spin_unlock_irqrestore(&tp->config25_lock, flags);
+ raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
@@ -5179,9 +5179,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
- spin_lock_init(&tp->cfg9346_usage_lock);
- spin_lock_init(&tp->config25_lock);
- spin_lock_init(&tp->mac_ocp_lock);
+ raw_spin_lock_init(&tp->cfg9346_usage_lock);
+ raw_spin_lock_init(&tp->config25_lock);
+ raw_spin_lock_init(&tp->mac_ocp_lock);
dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
struct pcpu_sw_netstats);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 29afaddb598d..fa6d6202b129 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -347,17 +347,6 @@ out:
return -ENOMEM;
}
-static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
-{
- struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
-
- gq->ring_size = TS_RING_SIZE;
- gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
- sizeof(struct rswitch_ts_desc) *
- (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- return !gq->ts_ring ? -ENOMEM : 0;
-}
-
static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
{
desc->dptrl = cpu_to_le32(lower_32_bits(addr));
@@ -533,6 +522,28 @@ static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
gwca->linkfix_table = NULL;
}
+static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+ struct rswitch_ts_desc *desc;
+
+ gq->ring_size = TS_RING_SIZE;
+ gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct rswitch_ts_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+
+ if (!gq->ts_ring)
+ return -ENOMEM;
+
+ rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
+ desc = &gq->ts_ring[gq->ring_size];
+ desc->desc.die_dt = DT_LINKFIX;
+ rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+ INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+
+ return 0;
+}
+
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq;
@@ -1485,7 +1496,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
netif_stop_subqueue(ndev, 0);
- return ret;
+ return NETDEV_TX_BUSY;
}
if (skb_put_padto(skb, ETH_ZLEN))
@@ -1780,9 +1791,6 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
goto err_ts_queue_alloc;
- rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
-
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
err = rswitch_device_alloc(priv, i);
if (err < 0) {
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index d30459dbfe8f..b63e47af6365 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2950,7 +2950,7 @@ static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
return tstamp;
}
-static void
+static int
efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
@@ -2958,13 +2958,14 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
unsigned int tx_ev_type;
+ int work_done;
u64 ts_part;
if (unlikely(READ_ONCE(efx->reset_pending)))
- return;
+ return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
- return;
+ return 0;
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
@@ -2973,8 +2974,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
if (!tx_queue->timestamping) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
- return;
+ return efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
}
/* Transmit timestamps are only available for 8XXX series. They result
@@ -3000,6 +3000,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
* fields in the event.
*/
tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
+ work_done = 0;
switch (tx_ev_type) {
case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
@@ -3016,6 +3017,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_queue->completed_timestamp_major = ts_part;
efx_xmit_done_single(tx_queue);
+ work_done = 1;
break;
default:
@@ -3026,6 +3028,8 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_VAL(*event));
break;
}
+
+ return work_done;
}
static void
@@ -3081,13 +3085,16 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
}
}
+#define EFX_NAPI_MAX_TX 512
+
static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
{
struct efx_nic *efx = channel->efx;
efx_qword_t event, *p_event;
unsigned int read_ptr;
- int ev_code;
+ int spent_tx = 0;
int spent = 0;
+ int ev_code;
if (quota <= 0)
return spent;
@@ -3126,7 +3133,11 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
}
break;
case ESE_DZ_EV_CODE_TX_EV:
- efx_ef10_handle_tx_event(channel, &event);
+ spent_tx += efx_ef10_handle_tx_event(channel, &event);
+ if (spent_tx >= EFX_NAPI_MAX_TX) {
+ spent = quota;
+ goto out;
+ }
break;
case ESE_DZ_EV_CODE_DRIVER_EV:
efx_ef10_handle_driver_event(channel, &event);
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index d916877b5a9a..be395cd8770b 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -378,7 +378,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
efx->net_dev = net_dev;
SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
- net_dev->features |= efx->type->offload_features;
+ /* enable all supported features except rx-fcs and rx-all */
+ net_dev->features |= efx->type->offload_features &
+ ~(NETIF_F_RXFCS | NETIF_F_RXALL);
net_dev->hw_features |= efx->type->offload_features;
net_dev->hw_enc_features |= efx->type->offload_features;
net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 4dc643b0d2db..7adde9639c8a 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -253,6 +253,8 @@ static void ef100_ev_read_ack(struct efx_channel *channel)
efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
}
+#define EFX_NAPI_MAX_TX 512
+
static int ef100_ev_process(struct efx_channel *channel, int quota)
{
struct efx_nic *efx = channel->efx;
@@ -260,6 +262,7 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
bool evq_phase, old_evq_phase;
unsigned int read_ptr;
efx_qword_t *p_event;
+ int spent_tx = 0;
int spent = 0;
bool ev_phase;
int ev_type;
@@ -295,7 +298,9 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
efx_mcdi_process_event(channel, p_event);
break;
case ESE_GZ_EF100_EV_TX_COMPLETION:
- ef100_ev_tx(channel, p_event);
+ spent_tx += ef100_ev_tx(channel, p_event);
+ if (spent_tx >= EFX_NAPI_MAX_TX)
+ spent = quota;
break;
case ESE_GZ_EF100_EV_DRIVER:
netif_info(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 29ffaf35559d..849e5555bd12 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -346,7 +346,7 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue)
ef100_tx_push_buffers(tx_queue);
}
-void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
{
unsigned int tx_done =
EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
@@ -357,7 +357,7 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
tx_queue->ptr_mask;
- efx_xmit_done(tx_queue, tx_index);
+ return efx_xmit_done(tx_queue, tx_index);
}
/* Add a socket buffer to a TX queue
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index e9e11540fcde..d9a0819c5a72 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -20,7 +20,7 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue);
void ef100_tx_write(struct efx_tx_queue *tx_queue);
unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
-void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
+int ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index fcea3ea809d7..41b33a75333c 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -301,6 +301,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -322,6 +323,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
efx->legacy_irq = efx->pci_dev->irq;
}
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 381b805659d3..ef9971cbb695 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL,
0);
+
+ /* If the partition does not exist, that is not an error. */
+ if (rc == -ENOENT)
+ return 0;
+
if (rc) {
- netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n",
- version_name);
+ netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n",
+ version_name, rc);
return rc;
}
@@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
static int efx_devlink_info_stored_versions(struct efx_nic *efx,
struct devlink_info_req *req)
{
- int rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_BUNDLE,
- DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
- if (rc)
- return rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_MC_FIRMWARE,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
- if (rc)
- return rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
- EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
- if (rc)
- return rc;
-
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_EXPANSION_ROM,
- EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
- if (rc)
- return rc;
+ int err;
- rc = efx_devlink_info_nvram_partition(efx, req,
- NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
- EFX_DEVLINK_INFO_VERSION_FW_UEFI);
- return rc;
+ /* We do not care here about the specific error but just if an error
+ * happened. The specific error will be reported inside the call
+ * through system messages, and if any error happened in any call
+ * below, we report it through extack.
+ */
+ err = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_BUNDLE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_MC_FIRMWARE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_ROM,
+ EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
+
+ err |= efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
+ EFX_DEVLINK_INFO_VERSION_FW_UEFI);
+ return err;
}
#define EFX_VER_FLAG(_f) \
@@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink,
{
struct efx_devlink *devlink_private = devlink_priv(devlink);
struct efx_nic *efx = devlink_private->efx;
- int rc;
+ int err;
- /* Several different MCDI commands are used. We report first error
- * through extack returning at that point. Specific error
- * information via system messages.
+ /* Several different MCDI commands are used. We report if errors
+ * happened through extack. Specific error information via system
+ * messages inside the calls.
*/
- rc = efx_devlink_info_board_cfg(efx, req);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Getting board info failed");
- return rc;
- }
- rc = efx_devlink_info_stored_versions(efx, req);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed");
- return rc;
- }
- rc = efx_devlink_info_running_versions(efx, req);
- if (rc) {
- NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed");
- return rc;
- }
+ err = efx_devlink_info_board_cfg(efx, req);
+
+ err |= efx_devlink_info_stored_versions(efx, req);
+
+ err |= efx_devlink_info_running_versions(efx, req);
+
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages");
return 0;
}
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 06ed74994e36..1776f7f8a7a9 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -302,6 +302,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -323,6 +324,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
efx->legacy_irq = efx->pci_dev->irq;
}
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0327639a628a..c004443c1d58 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -624,13 +624,12 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (!found) { /* We don't care. */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter that doesn't egdev us\n");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
if (rc)
- goto release;
+ return rc;
if (efx_tc_match_is_encap(&match.mask)) {
enum efx_encap_type type;
@@ -639,8 +638,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (type == EFX_ENCAP_TYPE_NONE) {
NL_SET_ERR_MSG_MOD(extack,
"Egress encap match on unsupported tunnel device");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
rc = efx_mae_check_encap_type_supported(efx, type);
@@ -648,25 +646,24 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
NL_SET_ERR_MSG_FMT_MOD(extack,
"Firmware reports no support for %s encap match",
efx_tc_encap_type_name(type));
- goto release;
+ return rc;
}
rc = efx_tc_flower_record_encap_match(efx, &match, type,
extack);
if (rc)
- goto release;
+ return rc;
} else {
/* This is not a tunnel decap rule, ignore it */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter without encap match\n");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule) {
rc = -ENOMEM;
- goto release;
+ goto out_free;
}
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
@@ -678,7 +675,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
rc = -EEXIST;
- goto release;
+ goto out_free;
}
act = kzalloc(sizeof(*act), GFP_USER);
@@ -843,6 +840,7 @@ release:
efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
+out_free:
kfree(rule);
if (match.encap)
efx_tc_flower_release_encap_match(efx, match.encap);
@@ -899,8 +897,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
return rc;
if (efx_tc_match_is_encap(&match.mask)) {
NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported");
- rc = -EOPNOTSUPP;
- goto release;
+ return -EOPNOTSUPP;
}
if (tc->common.chain_index) {
@@ -924,9 +921,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
- rc = -EEXIST;
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
- goto release;
+ kfree(rule);
+ return -EEXIST;
}
/* Parse actions */
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 67e789b96c43..755aa92bf823 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -249,7 +249,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
}
}
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
@@ -279,6 +279,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
}
efx_xmit_done_check_empty(tx_queue);
+
+ return pkts_compl + efv_pkts_compl;
}
/* Remove buffers put into a tx_queue for the current packet.
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index d87aecbc7bf1..1e9f42938aac 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -28,7 +28,7 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
}
void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 16a8c361283b..f07905f00f98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -644,7 +644,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->has_gmac4 = 1;
- plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
+ if (ethqos->has_emac3)
+ plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0fca81507a77..87510951f4e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3873,7 +3873,6 @@ irq_error:
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv, &priv->dma_conf);
phylink_disconnect_phy(priv->phylink);
init_phy_error:
pm_runtime_put(priv->device);
@@ -3891,6 +3890,9 @@ static int stmmac_open(struct net_device *dev)
return PTR_ERR(dma_conf);
ret = __stmmac_open(dev, dma_conf);
+ if (ret)
+ free_dma_desc_resources(priv, dma_conf);
+
kfree(dma_conf);
return ret;
}
@@ -5633,12 +5635,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
- kfree(dma_conf);
if (ret) {
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
return ret;
}
+ kfree(dma_conf);
+
stmmac_set_rx_mode(dev);
}
@@ -7233,8 +7238,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY |
- NETDEV_XDP_ACT_NDO_XMIT;
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
ret = stmmac_tc_init(priv, priv);
if (!ret) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index 9d4d8c3dad0a..aa6f16d3df64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -117,6 +117,9 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
+
need_update = !!priv->xdp_prog != !!prog;
if (if_running && need_update)
stmmac_xdp_release(dev);
@@ -131,5 +134,8 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
if (if_running && need_update)
stmmac_xdp_open(dev);
+ if (prog)
+ xdp_features_set_redirect_target(dev, false);
+
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 4ef05bad4613..d61dfa250feb 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -5077,6 +5077,8 @@ err_out_iounmap:
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
+ vfree(cp->fw_data);
+
pci_iounmap(pdev, cp->regs);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 11cbcd9e2c72..bebcfd5e6b57 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2068,7 +2068,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* Initialize the Serdes PHY for the port */
ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
if (ret)
- return ret;
+ goto of_node_put;
port->slave.mac_only =
of_property_read_bool(port_np, "ti,mac-only");
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index f9972b8140f9..a03490ba2e5b 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1348,3 +1348,5 @@ module_spi_driver(adf7242_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADF7242 IEEE802.15.4 Transceiver Driver");
MODULE_LICENSE("GPL");
+
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 8445c2189d11..31cba9aa7636 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -685,7 +685,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
{
struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
- struct hwsim_edge_info *einfo;
+ struct hwsim_edge_info *einfo, *einfo_old;
struct hwsim_phy *phy_v0;
struct hwsim_edge *e;
u32 v0, v1;
@@ -723,8 +723,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
list_for_each_entry_rcu(e, &phy_v0->edges, list) {
if (e->endpoint->idx == v1) {
einfo->lqi = lqi;
- rcu_assign_pointer(e->info, einfo);
+ einfo_old = rcu_replace_pointer(e->info, einfo,
+ lockdep_is_held(&hwsim_phys_lock));
rcu_read_unlock();
+ kfree_rcu(einfo_old, rcu);
mutex_unlock(&hwsim_phys_lock);
return 0;
}
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 2ee80ed140b7..afa1d56d9095 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -119,7 +119,7 @@ enum ipa_status_field_id {
};
/* Size in bytes of an IPA packet status structure */
-#define IPA_STATUS_SIZE sizeof(__le32[4])
+#define IPA_STATUS_SIZE sizeof(__le32[8])
/* IPA status structure decoder; looks up field values for a structure */
static u32 ipa_status_extract(struct ipa *ipa, const void *data,
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index 71712ea25403..d5b05e803219 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -102,6 +102,10 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
skb->dev = addr->master->dev;
skb->skb_iif = skb->dev->ifindex;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (addr->atype == IPVL_IPV6)
+ IP6CB(skb)->iif = skb->dev->ifindex;
+#endif
len = skb->len + ETH_HLEN;
ipvlan_count_rx(addr->master, len, true, false);
out:
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3427993f94f7..984dfa5d6c11 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3997,17 +3997,15 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
return -ENOMEM;
secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
- if (!secy->tx_sc.stats) {
- free_percpu(macsec->stats);
+ if (!secy->tx_sc.stats)
return -ENOMEM;
- }
secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
- if (!secy->tx_sc.md_dst) {
- free_percpu(secy->tx_sc.stats);
- free_percpu(macsec->stats);
+ if (!secy->tx_sc.md_dst)
+ /* macsec and secy percpu stats will be freed when unregistering
+ * net_device in macsec_free_netdev()
+ */
return -ENOMEM;
- }
if (sci == MACSEC_UNDEF_SCI)
sci = dev_to_sci(dev, MACSEC_PORT_ES);
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 1e0c206d0f2e..da2001ea1f99 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -291,7 +291,8 @@ static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
}
-static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad,
+ int reg)
{
u8 buf[4], res[6];
int bus_addr, ret;
@@ -302,7 +303,7 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
return 0xffff;
buf[0] = ROLLBALL_DATA_ADDR;
- buf[1] = (reg >> 16) & 0x1f;
+ buf[1] = devad;
buf[2] = (reg >> 8) & 0xff;
buf[3] = reg & 0xff;
@@ -322,8 +323,8 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
return val;
}
-static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
- u16 val)
+static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad,
+ int reg, u16 val)
{
int bus_addr, ret;
u8 buf[6];
@@ -333,7 +334,7 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
return 0;
buf[0] = ROLLBALL_DATA_ADDR;
- buf[1] = (reg >> 16) & 0x1f;
+ buf[1] = devad;
buf[2] = (reg >> 8) & 0xff;
buf[3] = reg & 0xff;
buf[4] = val >> 8;
@@ -405,8 +406,8 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
return ERR_PTR(ret);
}
- mii->read = i2c_mii_read_rollball;
- mii->write = i2c_mii_write_rollball;
+ mii->read_c45 = i2c_mii_read_rollball;
+ mii->write_c45 = i2c_mii_write_rollball;
break;
default:
mii->read = i2c_mii_read_default_c22;
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index f19d48c94fe0..72f25e778840 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -873,7 +873,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
switch (compat->an_mode) {
case DW_AN_C73:
- if (phylink_autoneg_inband(mode)) {
+ if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
ret = xpcs_config_aneg_c73(xpcs, compat);
if (ret)
return ret;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index d75f526a20a4..e397e7d642d9 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -44,6 +44,7 @@
#define DP83867_STRAP_STS1 0x006E
#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
+#define DP83867_DSP_FFE_CFG 0x012c
#define DP83867_RXFCFG 0x0134
#define DP83867_RXFPMD1 0x0136
#define DP83867_RXFPMD2 0x0137
@@ -935,14 +936,33 @@ static int dp83867_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
if (err < 0)
return err;
usleep_range(10, 20);
- return phy_modify(phydev, MII_DP83867_PHYCTRL,
+ err = phy_modify(phydev, MII_DP83867_PHYCTRL,
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+ if (err < 0)
+ return err;
+
+ /* Configure the DSP Feedforward Equalizer Configuration register to
+ * improve short cable (< 1 meter) performance. This will not affect
+ * long cable performance.
+ */
+ err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG,
+ 0x0e81);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ return 0;
}
static void dp83867_link_change_notify(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 389f33a12534..8b3618d3da4a 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1287,7 +1287,7 @@ EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*/
-int mdiobus_c45_modify_changed(struct mii_bus *bus, int devad, int addr,
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
u32 regnum, u16 mask, u16 set)
{
int err;
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index a50235fdf7d9..defe5cc6d4fc 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -179,6 +179,7 @@ enum rgmii_clock_delay {
#define VSC8502_RGMII_CNTL 20
#define VSC8502_RGMII_RX_DELAY_MASK 0x0070
#define VSC8502_RGMII_TX_DELAY_MASK 0x0007
+#define VSC8502_RGMII_RX_CLK_DISABLE 0x0800
#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
#define MSCC_PHY_WOL_MID_MAC_ADDR 22
@@ -276,6 +277,7 @@ enum rgmii_clock_delay {
/* Microsemi PHY ID's
* Code assumes lowest nibble is 0
*/
+#define PHY_ID_VSC8501 0x00070530
#define PHY_ID_VSC8502 0x00070630
#define PHY_ID_VSC8504 0x000704c0
#define PHY_ID_VSC8514 0x00070670
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 62bf99e45af1..28df8a2e4230 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -519,16 +519,27 @@ out_unlock:
* * 2.0 ns (which causes the data to be sampled at exactly half way between
* clock transitions at 1000 Mbps) if delays should be enabled
*/
-static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
- u16 rgmii_rx_delay_mask,
- u16 rgmii_tx_delay_mask)
+static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
+ u16 rgmii_rx_delay_mask,
+ u16 rgmii_tx_delay_mask)
{
u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
u16 reg_val = 0;
- int rc;
+ u16 mask = 0;
+ int rc = 0;
- mutex_lock(&phydev->lock);
+ /* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit
+ * to be unset for all PHY modes, so do that as part of the paged
+ * register modification.
+ * For some family members (like VSC8530/31/40/41) this bit is reserved
+ * and read-only, and the RX clock is enabled by default.
+ */
+ if (rgmii_cntl == VSC8502_RGMII_CNTL)
+ mask |= VSC8502_RGMII_RX_CLK_DISABLE;
+
+ if (phy_interface_is_rgmii(phydev))
+ mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -537,31 +548,20 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
- rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
- rgmii_cntl,
- rgmii_rx_delay_mask | rgmii_tx_delay_mask,
- reg_val);
-
- mutex_unlock(&phydev->lock);
+ if (mask)
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+ rgmii_cntl, mask, reg_val);
return rc;
}
static int vsc85xx_default_config(struct phy_device *phydev)
{
- int rc;
-
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- if (phy_interface_mode_is_rgmii(phydev->interface)) {
- rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
- VSC8502_RGMII_RX_DELAY_MASK,
- VSC8502_RGMII_TX_DELAY_MASK);
- if (rc)
- return rc;
- }
-
- return 0;
+ return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL,
+ VSC8502_RGMII_RX_DELAY_MASK,
+ VSC8502_RGMII_TX_DELAY_MASK);
}
static int vsc85xx_get_tunable(struct phy_device *phydev,
@@ -1758,13 +1758,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
if (ret)
return ret;
- if (phy_interface_is_rgmii(phydev)) {
- ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
- VSC8572_RGMII_RX_DELAY_MASK,
- VSC8572_RGMII_TX_DELAY_MASK);
- if (ret)
- return ret;
- }
+ ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL,
+ VSC8572_RGMII_RX_DELAY_MASK,
+ VSC8572_RGMII_TX_DELAY_MASK);
+ if (ret)
+ return ret;
ret = genphy_soft_reset(phydev);
if (ret)
@@ -2317,6 +2315,30 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8501,
+ .name = "Microsemi GE VSC8501 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_BASIC_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .read_status = &vsc85xx_read_status,
+ .handle_interrupt = vsc85xx_handle_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8502,
.name = "Microsemi GE VSC8502 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2656,6 +2678,8 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8501, 0xfffffff0, },
+ { PHY_ID_VSC8502, 0xfffffff0, },
{ PHY_ID_VSC8504, 0xfffffff0, },
{ PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 6301a9abfb95..ea1073adc5a1 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -274,13 +274,6 @@ static int gpy_config_init(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
-static bool gpy_has_broken_mdint(struct phy_device *phydev)
-{
- /* At least these PHYs are known to have broken interrupt handling */
- return phydev->drv->phy_id == PHY_ID_GPY215B ||
- phydev->drv->phy_id == PHY_ID_GPY215C;
-}
-
static int gpy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -300,8 +293,7 @@ static int gpy_probe(struct phy_device *phydev)
phydev->priv = priv;
mutex_init(&priv->mbox_lock);
- if (gpy_has_broken_mdint(phydev) &&
- !device_property_present(dev, "maxlinear,use-broken-interrupts"))
+ if (!device_property_present(dev, "maxlinear,use-broken-interrupts"))
phydev->dev_flags |= PHY_F_NO_IRQ;
fw_version = phy_read(phydev, PHY_FWV);
@@ -659,11 +651,9 @@ static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev)
* frame. Therefore, polling is the best we can do and won't do any more
* harm.
* It was observed that this bug happens on link state and link speed
- * changes on a GPY215B and GYP215C independent of the firmware version
- * (which doesn't mean that this list is exhaustive).
+ * changes independent of the firmware version.
*/
- if (gpy_has_broken_mdint(phydev) &&
- (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC))) {
+ if (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC)) {
reg = gpy_mbox_read(phydev, REG_GPIO0_OUT);
if (reg < 0) {
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 17d0d0555a79..53598210be6c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3021,6 +3021,15 @@ static int phy_led_blink_set(struct led_classdev *led_cdev,
return err;
}
+static void phy_leds_unregister(struct phy_device *phydev)
+{
+ struct phy_led *phyled;
+
+ list_for_each_entry(phyled, &phydev->leds, list) {
+ led_classdev_unregister(&phyled->led_cdev);
+ }
+}
+
static int of_phy_led(struct phy_device *phydev,
struct device_node *led)
{
@@ -3054,7 +3063,7 @@ static int of_phy_led(struct phy_device *phydev,
init_data.fwnode = of_fwnode_handle(led);
init_data.devname_mandatory = true;
- err = devm_led_classdev_register_ext(dev, cdev, &init_data);
+ err = led_classdev_register_ext(dev, cdev, &init_data);
if (err)
return err;
@@ -3083,6 +3092,7 @@ static int of_phy_leds(struct phy_device *phydev)
err = of_phy_led(phydev, led);
if (err) {
of_node_put(led);
+ phy_leds_unregister(phydev);
return err;
}
}
@@ -3305,6 +3315,9 @@ static int phy_remove(struct device *dev)
cancel_delayed_work_sync(&phydev->state_queue);
+ if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
+ phy_leds_unregister(phydev);
+
phydev->state = PHY_DOWN;
sfp_bus_del_upstream(phydev->sfp_bus);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a4111f1be375..5efdeb59f4b2 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -188,6 +188,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_GMII:
return SPEED_1000;
@@ -204,7 +205,6 @@ static int phylink_interface_max_speed(phy_interface_t interface)
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
return SPEED_10000;
case PHY_INTERFACE_MODE_25GBASER:
@@ -2226,6 +2226,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
ASSERT_RTNL();
if (pl->phydev) {
+ struct ethtool_link_ksettings phy_kset = *kset;
+
+ linkmode_and(phy_kset.link_modes.advertising,
+ phy_kset.link_modes.advertising,
+ pl->supported);
+
/* We can rely on phylib for this update; we also do not need
* to update the pl->link_config settings:
* - the configuration returned via ksettings_get() will come
@@ -2244,11 +2250,10 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* the presence of a PHY, this should not be changed as that
* should be determined from the media side advertisement.
*/
- return phy_ethtool_ksettings_set(pl->phydev, kset);
+ return phy_ethtool_ksettings_set(pl->phydev, &phy_kset);
}
config = pl->link_config;
-
/* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
pl->supported);
@@ -3294,6 +3299,41 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state,
EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
/**
+ * phylink_decode_usgmii_word() - decode the USGMII word from a MAC PCS
+ * @state: a pointer to a struct phylink_link_state.
+ * @lpa: a 16 bit value which stores the USGMII auto-negotiation word
+ *
+ * Helper for MAC PCS supporting the USGMII protocol and the auto-negotiation
+ * code word. Decode the USGMII code word and populate the corresponding fields
+ * (speed, duplex) into the phylink_link_state structure. The structure for this
+ * word is the same as the USXGMII word, except it only supports speeds up to
+ * 1Gbps.
+ */
+static void phylink_decode_usgmii_word(struct phylink_link_state *state,
+ uint16_t lpa)
+{
+ switch (lpa & MDIO_USXGMII_SPD_MASK) {
+ case MDIO_USXGMII_10:
+ state->speed = SPEED_10;
+ break;
+ case MDIO_USXGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case MDIO_USXGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->link = false;
+ return;
+ }
+
+ if (lpa & MDIO_USXGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+}
+
+/**
* phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
* @bmsr: The value of the %MII_BMSR register
@@ -3330,9 +3370,11 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
+ case PHY_INTERFACE_MODE_QUSGMII:
+ phylink_decode_usgmii_word(state, lpa);
+ break;
default:
state->link = false;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index d10606f257c4..555b0b1e9a78 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev)
team->dev = dev;
team_set_no_mode(team);
+ team->notifier_ctx = false;
team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
@@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused,
team_del_slave(port->team->dev, dev);
break;
case NETDEV_FEAT_CHANGE:
- team_compute_features(port->team);
+ if (!port->team->notifier_ctx) {
+ port->team->notifier_ctx = true;
+ team_compute_features(port->team);
+ port->team->notifier_ctx = false;
+ }
break;
case NETDEV_PRECHANGEMTU:
/* Forbid to change mtu of underlaying device */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d4d0a41a905a..d75456adc62a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1977,6 +1977,14 @@ napi_busy:
int queue_len;
spin_lock_bh(&queue->lock);
+
+ if (unlikely(tfile->detached)) {
+ spin_unlock_bh(&queue->lock);
+ rcu_read_unlock();
+ err = -EBUSY;
+ goto free_skb;
+ }
+
__skb_queue_tail(queue, skb);
queue_len = skb_queue_len(queue);
spin_unlock(&queue->lock);
@@ -2512,6 +2520,13 @@ build:
if (tfile->napi_enabled) {
queue = &tfile->sk.sk_write_queue;
spin_lock(&queue->lock);
+
+ if (unlikely(tfile->detached)) {
+ spin_unlock(&queue->lock);
+ kfree_skb(skb);
+ return -EBUSY;
+ }
+
__skb_queue_tail(queue, skb);
spin_unlock(&queue->lock);
ret = 1;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 6ce8f4f0c70e..db05622f1f70 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -181,9 +181,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
else
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
- max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
- if (max == 0)
+ if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
+ else
+ max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
+ USB_CDC_NCM_NTB_MIN_OUT_SIZE,
+ CDC_NCM_NTB_MAX_SIZE_TX);
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);
@@ -1244,6 +1247,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* further.
*/
if (skb_out == NULL) {
+ /* If even the smallest allocation fails, abort. */
+ if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
+ goto alloc_failed;
ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
(unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
@@ -1262,13 +1268,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
/* No allocation possible so we will abort */
- if (skb_out == NULL) {
- if (skb != NULL) {
- dev_kfree_skb_any(skb);
- dev->net->stats.tx_dropped++;
- }
- goto exit_no_skb;
- }
+ if (!skb_out)
+ goto alloc_failed;
ctx->tx_low_mem_val--;
}
if (ctx->is_ndp16) {
@@ -1461,6 +1462,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
return skb_out;
+alloc_failed:
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ dev->net->stats.tx_dropped++;
+ }
exit_no_skb:
/* Start timer, if there is a remaining non-empty skb */
if (ctx->tx_curr_skb != NULL && n > 0)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 571e37e67f9c..2e7c7b0cdc54 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1220,7 +1220,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9091, 2)}, /* Compal RXM-G1 */
{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x90db, 2)}, /* Compal RXM-G1 */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
@@ -1325,7 +1327,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
- {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
+ {QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a12ae26db0e2..486b5849033d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -205,6 +205,8 @@ struct control_buf {
__virtio16 vid;
__virtio64 offloads;
struct virtio_net_ctrl_rss rss;
+ struct virtio_net_ctrl_coal_tx coal_tx;
+ struct virtio_net_ctrl_coal_rx coal_rx;
};
struct virtnet_info {
@@ -1868,6 +1870,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
return received;
}
+static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
+{
+ virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
+ napi_disable(&vi->rq[qp_index].napi);
+ xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
+}
+
+static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
+{
+ struct net_device *dev = vi->dev;
+ int err;
+
+ err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
+ vi->rq[qp_index].napi.napi_id);
+ if (err < 0)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err < 0)
+ goto err_xdp_reg_mem_model;
+
+ virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+
+ return 0;
+
+err_xdp_reg_mem_model:
+ xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
+ return err;
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1881,22 +1915,20 @@ static int virtnet_open(struct net_device *dev)
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
- err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
+ err = virtnet_enable_queue_pair(vi, i);
if (err < 0)
- return err;
-
- err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err < 0) {
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
- return err;
- }
-
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
+ goto err_enable_qp;
}
return 0;
+
+err_enable_qp:
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+
+ for (i--; i >= 0; i--)
+ virtnet_disable_queue_pair(vi, i);
+ return err;
}
static int virtnet_poll_tx(struct napi_struct *napi, int budget)
@@ -2305,11 +2337,8 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- napi_disable(&vi->rq[i].napi);
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_disable_queue_pair(vi, i);
return 0;
}
@@ -2907,12 +2936,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
struct scatterlist sgs_tx, sgs_rx;
- struct virtio_net_ctrl_coal_tx coal_tx;
- struct virtio_net_ctrl_coal_rx coal_rx;
- coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
- coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
- sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+ vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -2923,9 +2950,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
vi->tx_usecs = ec->tx_coalesce_usecs;
vi->tx_max_packets = ec->tx_max_coalesced_frames;
- coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
- coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
- sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+ vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index d62a904d2e42..56326f38fe8a 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -384,6 +384,9 @@ static int lapbeth_new_device(struct net_device *dev)
ASSERT_RTNL();
+ if (dev->type != ARPHRD_ETHER)
+ return -EINVAL;
+
ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
lapbeth_setup);
if (!ndev)
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 9fc7c088a539..67b4bac048e5 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -651,7 +651,7 @@ struct b43_iv {
union {
__be16 d16;
__be32 d32;
- } data __packed;
+ } __packed data;
} __packed;
diff --git a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
index 6b0cec467938..f49365d14619 100644
--- a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
@@ -379,7 +379,7 @@ struct b43legacy_iv {
union {
__be16 d16;
__be32 d32;
- } data __packed;
+ } __packed data;
} __packed;
#define B43legacy_PHYMODE(phytype) (1 << (phytype))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ff710b0b5071..00679a990e3d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1039,6 +1039,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
+ if (!id) {
+ dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device);
+ return -ENODEV;
+ }
+
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 59f3e9c5e139..80220685f5e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2394,6 +2394,9 @@ static void brcmf_pcie_debugfs_create(struct device *dev)
}
#endif
+/* Forward declaration for pci_match_id() call */
+static const struct pci_device_id brcmf_pcie_devid_table[];
+
static int
brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -2404,6 +2407,14 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_core *core;
struct brcmf_bus *bus;
+ if (!id) {
+ id = pci_match_id(brcmf_pcie_devid_table, pdev);
+ if (!id) {
+ pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", pdev->vendor, pdev->device);
+ return -ENODEV;
+ }
+ }
+
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
ret = -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 246843aeb696..2178675ae1a4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1331,6 +1331,9 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
brcmf_usb_detach(devinfo);
}
+/* Forward declaration for usb_match_id() call */
+static const struct usb_device_id brcmf_usb_devid_table[];
+
static int
brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
@@ -1342,6 +1345,14 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
u32 num_of_eps;
u8 endpoint_num, ep;
+ if (!id) {
+ id = usb_match_id(intf, brcmf_usb_devid_table);
+ if (!id) {
+ dev_err(&intf->dev, "Error could not find matching usb_device_id\n");
+ return -ENODEV;
+ }
+ }
+
brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 5f4a51310add..cb9181f05501 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
},
{ .ident = "ASUS",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
{}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index d9faaae01abd..55219974b92b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
}
static void *
-iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_dump_ini_region_data *reg_data,
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
struct iwl_fw_ini_monitor_dump *data,
const struct iwl_fw_mon_regs *addrs)
{
- struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
- u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
-
if (!iwl_trans_grab_nic_access(fwrt->trans)) {
IWL_ERR(fwrt, "Failed to get monitor header\n");
return NULL;
@@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
- return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
&fwrt->trans->cfg->mon_dram_regs);
}
@@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
void *data, u32 data_len)
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
- return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
&fwrt->trans->cfg->mon_smem_regs);
}
@@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
- return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+ return iwl_dump_ini_mon_fill_header(fwrt,
+ /* no offset calculation later */
+ IWL_FW_INI_ALLOCATION_ID_DBGC1,
+ mon_dump,
&fwrt->trans->cfg->mon_dbgi_regs);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 37aa4676dc94..6d1007f24b4a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2732,17 +2732,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
if (wowlan_info_ver < 2) {
struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
- notif = kmemdup(notif_v1,
- offsetofend(struct iwl_wowlan_info_notif,
- received_beacons),
- GFP_ATOMIC);
-
+ notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
if (!notif)
return false;
notif->tid_tear_down = notif_v1->tid_tear_down;
notif->station_id = notif_v1->station_id;
-
+ memset_after(notif, 0, station_id);
} else {
notif = (void *)pkt->data;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 3963a0d4ed04..652a603c4500 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -526,6 +526,11 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return PTR_ERR_OR_ZERO(sta);
+ }
+
if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
FTM_PUT_FLAG(PMF);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index b35c96cf7ad2..205c09bc9863 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1091,7 +1091,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
},
{ .ident = "LENOVO",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
},
},
{ .ident = "DELL",
@@ -1727,8 +1727,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
+ if (iwl_rfi_supported(mvm)) {
if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index eb828de40a3c..3814915cb1a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -123,11 +123,13 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (mvmvif->link[i]->phy_ctxt)
count++;
- /* FIXME: IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM should be
- * defined per HW
- */
- if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM)
- return -EINVAL;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (count > mvm->fw->ucode_capa.num_beacons)
+ return -EOPNOTSUPP;
+ /* this should be per HW or such */
+ } else if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) {
+ return -EOPNOTSUPP;
+ }
}
/* Catch early if driver tries to activate or deactivate a link
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 0f01b62357c6..17f788a5ff6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -3607,7 +3607,8 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- unsigned int i;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
/* Beacon interval check - firmware will crash if the beacon
* interval is less than 16. We can't avoid connecting at all,
@@ -3616,14 +3617,11 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
* wpa_s will blocklist the AP...
*/
- for_each_set_bit(i, (unsigned long *)&sta->valid_links,
- IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_link_sta *link_sta =
- link_sta_dereference_protected(sta, i);
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, i);
+ link_conf_dereference_protected(vif, link_id);
- if (!link_conf || !link_sta)
+ if (!link_conf)
continue;
if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) {
@@ -3645,24 +3643,23 @@ static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw,
bool is_sta)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int i;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
- for_each_set_bit(i, (unsigned long *)&sta->valid_links,
- IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_link_sta *link_sta =
- link_sta_dereference_protected(sta, i);
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, i);
+ link_conf_dereference_protected(vif, link_id);
- if (!link_conf || !link_sta || !mvmvif->link[i])
+ if (!link_conf || !mvmvif->link[link_id])
continue;
link_conf->he_support = link_sta->he_cap.has_he;
if (is_sta) {
- mvmvif->link[i]->he_ru_2mhz_block = false;
+ mvmvif->link[link_id]->he_ru_2mhz_block = false;
if (link_sta->he_cap.has_he)
- iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, i,
+ iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif,
+ link_id,
link_conf);
}
}
@@ -3675,6 +3672,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
struct iwl_mvm_sta_state_ops *callbacks)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_link_sta *link_sta;
unsigned int i;
int ret;
@@ -3699,15 +3697,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
NL80211_TDLS_SETUP);
}
- for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
- struct ieee80211_link_sta *link_sta;
-
- link_sta = link_sta_dereference_protected(sta, i);
- if (!link_sta)
- continue;
-
+ for_each_sta_active_link(vif, sta, link_sta, i)
link_sta->agg.max_rc_amsdu_len = 1;
- }
+
ieee80211_sta_recalc_aggregates(sta);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -3725,7 +3717,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- unsigned int i;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
lockdep_assert_held(&mvm->mutex);
@@ -3751,14 +3744,13 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
if (!mvm->mld_api_is_used)
goto out;
- for_each_set_bit(i, (unsigned long *)&sta->valid_links,
- IEEE80211_MLD_MAX_NUM_LINKS) {
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, i);
+ link_conf_dereference_protected(vif, link_id);
if (WARN_ON(!link_conf))
return -EINVAL;
- if (!mvmvif->link[i])
+ if (!mvmvif->link[link_id])
continue;
iwl_mvm_link_changed(mvm, vif, link_conf,
@@ -3889,6 +3881,9 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
* from the AP now.
*/
iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
+
+ /* Also free dup data just in case any assertions below fail */
+ kfree(mvm_sta->dup_data);
}
mutex_lock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index fbc2d5ed1006..7fb66c570959 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -906,11 +906,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
n_active++;
}
- if (vif->type == NL80211_IFTYPE_AP &&
- n_active > mvm->fw->ucode_capa.num_beacons)
- return -EOPNOTSUPP;
- else if (n_active > 1)
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (n_active > mvm->fw->ucode_capa.num_beacons)
+ return -EOPNOTSUPP;
+ } else if (n_active > 1) {
return -EOPNOTSUPP;
+ }
}
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 0bfdf4462755..85a4ce8449ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -667,15 +667,15 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta);
if (ret)
return ret;
- }
- spin_lock_init(&mvm_sta->lock);
+ spin_lock_init(&mvm_sta->lock);
- if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
- else
ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA,
STATION_TYPE_PEER);
+ } else {
+ ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
+ }
+
if (ret)
goto err;
@@ -728,7 +728,7 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_link_sta *link_sta;
unsigned int link_id;
- int ret = 0;
+ int ret = -EINVAL;
lockdep_assert_held(&mvm->mutex);
@@ -791,8 +791,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
- kfree(mvm_sta->dup_data);
-
/* flush its queues here since we are freeing mvm_sta */
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct iwl_mvm_link_sta *mvm_link_sta =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6e7470d3a826..9e5008e0e47f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -2347,6 +2347,7 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
u32 old_sta_mask,
u32 new_sta_mask);
+bool iwl_rfi_supported(struct iwl_mvm *mvm);
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
struct iwl_rfi_lut_entry *rfi_table);
struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 6d18a1fd649b..fdf60afb0f3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
@@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v3, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index bb77bc9aa821..2ecd32bed752 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020 - 2021 Intel Corporation
+ * Copyright (C) 2020 - 2022 Intel Corporation
*/
#include "mvm.h"
@@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
PHY_BAND_6, PHY_BAND_6,}},
};
+bool iwl_rfi_supported(struct iwl_mvm *mvm)
+{
+ /* The feature depends on a platform bugfix, so for now
+ * it's always disabled.
+ * When the platform support detection is implemented we should
+ * check FW TLV and platform support instead.
+ */
+ return false;
+}
+
int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
{
int ret;
@@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
.len[0] = sizeof(cmd),
};
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+ if (!iwl_rfi_supported(mvm))
return -EOPNOTSUPP;
lockdep_assert_held(&mvm->mutex);
@@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
.flags = CMD_WANT_SKB,
};
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+ if (!iwl_rfi_supported(mvm))
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index a4c1e3bf4ff1..9a20468345e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2691,6 +2691,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
return;
lq_sta = mvm_sta;
+
+ spin_lock_bh(&lq_sta->pers.lock);
iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
@@ -2705,6 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
&txrc->reported_rate);
}
+ spin_unlock_bh(&lq_sta->pers.lock);
}
static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -3261,11 +3264,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* If it's locked we are in middle of init flow
* just wait for next tx status to update the lq_sta data
*/
- if (!spin_trylock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock))
+ if (!spin_trylock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock))
return;
__iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp);
- spin_unlock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock);
+ spin_unlock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock);
}
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -4114,9 +4117,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
} else {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- spin_lock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock);
+ spin_lock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock);
rs_drv_rate_init(mvm, sta, band);
- spin_unlock(&mvmsta->deflink.lq_sta.rs_drv.pers.lock);
+ spin_unlock_bh(&mvmsta->deflink.lq_sta.rs_drv.pers.lock);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index e1d02c260e69..6226e4e54a51 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -691,6 +691,11 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
rcu_read_lock();
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ goto out;
+ }
+
mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* SN is set to the last expired frame + 1 */
@@ -712,6 +717,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
entries[index].e.reorder_time +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
+
+out:
spin_unlock(&buf->lock);
}
@@ -2512,7 +2519,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
/* Unblock BCAST / MCAST station */
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
- cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+ cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 5469d634e289..05a54a69c135 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -281,7 +281,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
* A-MDPU and hence the timer continues to run. Then, the
* timer expires and sta is NULL.
*/
- if (!sta)
+ if (IS_ERR_OR_NULL(sta))
goto unlock;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -2089,9 +2089,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- if (iwl_mvm_has_new_rx_api(mvm))
- kfree(mvm_sta->dup_data);
-
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
return ret;
@@ -3785,6 +3782,9 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
u8 sta_id = mvmvif->deflink.ap_sta_id;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return NULL;
+
return sta->addr;
}
@@ -3822,6 +3822,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ if (!addr) {
+ IWL_ERR(mvm, "Failed to find mac address\n");
+ return -EINVAL;
+ }
+
/* get phase 1 key from mac80211 */
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 10d7178f1071..00719e130438 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1875,7 +1875,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (WARN_ON_ONCE(!sta || !sta->wme)) {
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index dba112394838..79115eb1c285 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -548,6 +548,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name),
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index da1d17b73a25..64002484ccad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -914,7 +914,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
poll_list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
list_del_init(&msta->poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index a5ec0f631385..fabf637bdf7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -173,7 +173,7 @@ enum {
#define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23)
#define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23)
-
+#define MT_TXS7_MPDU_RETRY_BYTE GENMASK(22, 0)
#define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23)
/* RXD DW0 */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index ee0fbfcd07d6..d39a3cc5e381 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -608,7 +608,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
/* PPDU based reporting */
if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
stats->tx_bytes +=
- le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) -
+ le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE);
stats->tx_packets +=
le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
stats->tx_failed +=
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 130eb7b4fd91..9b0f6053e0fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1004,10 +1004,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ struct mt7996_vif *mvif;
u16 tx_count = 15;
u32 val;
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
@@ -1015,7 +1015,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY));
- if (vif) {
+ mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+ if (mvif) {
omac_idx = mvif->mt76.omac_idx;
wmm_idx = mvif->mt76.wmm_idx;
band_idx = mvif->mt76.band_idx;
@@ -1081,14 +1082,18 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool mcast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
- u8 idx = mvif->basic_rates_idx;
+ u8 idx = MT7996_BASIC_RATES_TBL;
- if (mcast && mvif->mcast_rates_idx)
- idx = mvif->mcast_rates_idx;
- else if (beacon && mvif->beacon_rates_idx)
- idx = mvif->beacon_rates_idx;
+ if (mvif) {
+ if (mcast && mvif->mcast_rates_idx)
+ idx = mvif->mcast_rates_idx;
+ else if (beacon && mvif->beacon_rates_idx)
+ idx = mvif->beacon_rates_idx;
+ else
+ idx = mvif->basic_rates_idx;
+ }
- txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx);
+ txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 8eafbf1cee71..808c1c895113 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1803,6 +1803,7 @@ struct rtl8xxxu_priv {
u32 rege9c;
u32 regeb4;
u32 regebc;
+ u32 regrcr;
int next_mbox;
int nr_out_eps;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index fd8c8c6d53d6..831639d73657 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4171,6 +4171,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
rtl8xxxu_write32(priv, REG_RCR, val32);
+ priv->regrcr = val32;
if (fops->init_reg_rxfltmap) {
/* Accept all data frames */
@@ -6501,7 +6502,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags, u64 multicast)
{
struct rtl8xxxu_priv *priv = hw->priv;
- u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
+ u32 rcr = priv->regrcr;
dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
__func__, changed_flags, *total_flags);
@@ -6547,6 +6548,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
*/
rtl8xxxu_write32(priv, REG_RCR, rcr);
+ priv->regrcr = rcr;
*total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 7aa6edad0d01..144618bb94c8 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -88,15 +88,6 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- if (hw->conf.flags & IEEE80211_CONF_PS) {
- rtwdev->ps_enabled = true;
- } else {
- rtwdev->ps_enabled = false;
- rtw_leave_lps(rtwdev);
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
@@ -213,6 +204,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_core_port_switch(rtwdev, vif);
+ rtw_recalc_lps(rtwdev, vif);
mutex_unlock(&rtwdev->mutex);
@@ -244,6 +236,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
clear_bit(rtwvif->port, rtwdev->hw_port);
+ rtw_recalc_lps(rtwdev, NULL);
mutex_unlock(&rtwdev->mutex);
}
@@ -438,6 +431,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_SLOT)
rtw_conf_tx(rtwdev, rtwvif);
+ if (changed & BSS_CHANGED_PS)
+ rtw_recalc_lps(rtwdev, NULL);
+
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
@@ -918,7 +914,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
if (changed & IEEE80211_RC_BW_CHANGED)
- rtw_update_sta_info(rtwdev, si, true);
+ ieee80211_queue_work(rtwdev->hw, &si->rc_work);
}
const struct ieee80211_ops rtw_ops = {
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 5bf6b4581557..9447a3aae3b5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -271,8 +271,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
* more than two stations associated to the AP, then we can not enter
* lps, because fw does not handle the overlapped beacon interval
*
- * mac80211 should iterate vifs and determine if driver can enter
- * ps by passing IEEE80211_CONF_PS to us, all we need to do is to
+ * rtw_recalc_lps() iterate vifs and determine if driver can enter
+ * ps by vif->type and vif->cfg.ps, all we need to do here is to
* get that vif and check if device is having traffic more than the
* threshold.
*/
@@ -319,6 +319,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
return mac_id;
}
+static void rtw_sta_rc_work(struct work_struct *work)
+{
+ struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
+ rc_work);
+ struct rtw_dev *rtwdev = si->rtwdev;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_update_sta_info(rtwdev, si, true);
+ mutex_unlock(&rtwdev->mutex);
+}
+
int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
@@ -329,12 +340,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
return -ENOSPC;
+ si->rtwdev = rtwdev;
si->sta = sta;
si->vif = vif;
si->init_ra_lv = 1;
ewma_rssi_init(&si->avg_rssi);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw_txq_init(rtwdev, sta->txq[i]);
+ INIT_WORK(&si->rc_work, rtw_sta_rc_work);
rtw_update_sta_info(rtwdev, si, true);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@@ -353,6 +366,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
int i;
+ cancel_work_sync(&si->rc_work);
+
rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index a563285e90ed..9e841f6991a9 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -743,6 +743,7 @@ struct rtw_txq {
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
+ struct rtw_dev *rtwdev;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
@@ -767,6 +768,8 @@ struct rtw_sta_info {
bool use_cfg_mask;
struct cfg80211_bitrate_mask *mask;
+
+ struct work_struct rc_work;
};
enum rtw_bfee_role {
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 996365575f44..53933fb38a33 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -299,3 +299,46 @@ void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
__rtw_leave_lps_deep(rtwdev);
}
+
+struct rtw_vif_recalc_lps_iter_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_vif *found_vif;
+ int count;
+};
+
+static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data,
+ struct ieee80211_vif *vif)
+{
+ if (data->count < 0)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ data->count = -1;
+ return;
+ }
+
+ data->count++;
+ data->found_vif = vif;
+}
+
+static void rtw_vif_recalc_lps_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ __rtw_vif_recalc_lps(data, vif);
+}
+
+void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif)
+{
+ struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev };
+
+ if (new_vif)
+ __rtw_vif_recalc_lps(&data, new_vif);
+ rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data);
+
+ if (data.count == 1 && data.found_vif->cfg.ps) {
+ rtwdev->ps_enabled = true;
+ } else {
+ rtwdev->ps_enabled = false;
+ rtw_leave_lps(rtwdev);
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
index c194386f6db5..5ae83d2526cf 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.h
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -23,4 +23,6 @@ void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
void rtw_leave_lps(struct rtw_dev *rtwdev);
void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev);
+void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index af0459a79899..06fce7c3adda 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -87,11 +87,6 @@ static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
u8 buf[2];
int i;
- if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) {
- sdio_writew(rtwsdio->sdio_func, val, addr, err_ret);
- return;
- }
-
*(__le16 *)buf = cpu_to_le16(val);
for (i = 0; i < 2; i++) {
@@ -125,9 +120,6 @@ static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
u8 buf[2];
int i;
- if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2))
- return sdio_readw(rtwsdio->sdio_func, addr, err_ret);
-
for (i = 0; i < 2; i++) {
buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
if (*err_ret)
diff --git a/drivers/net/wireless/realtek/rtw88/usb.h b/drivers/net/wireless/realtek/rtw88/usb.h
index 30647f0dd61c..ad1d7955c6a5 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.h
+++ b/drivers/net/wireless/realtek/rtw88/usb.h
@@ -78,7 +78,7 @@ struct rtw_usb {
u8 pipe_interrupt;
u8 pipe_in;
u8 out_ep[RTW_USB_EP_MAX];
- u8 qsel_to_ep[TX_DESC_QSEL_MAX];
+ int qsel_to_ep[TX_DESC_QSEL_MAX];
u8 usb_txagg_num;
struct workqueue_struct *txwq, *rxwq;
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 7fc0a26a4d73..bad864d56bd5 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -2531,9 +2531,6 @@ static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
rtwvif->tdls_peer)
return;
- if (rtwdev->total_sta_assoc > 1)
- return;
-
if (rtwvif->offchan)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b8019cfc11b2..512de491a064 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1425,6 +1425,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
/* PCIE 64 */
.wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
+ /* 8852B PCIE SCC */
+ .wde_size7 = {RTW89_WDE_PG_64, 510, 2,},
/* DLFW */
.wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
/* 8852C DLFW */
@@ -1449,6 +1451,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt4 = {0, 0, 0, 0,},
/* PCIE 64 */
.wde_qt6 = {448, 48, 0, 16,},
+ /* 8852B PCIE SCC */
+ .wde_qt7 = {446, 48, 0, 16,},
/* 8852C DLFW */
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index a8d9847ef0b4..6ba633ccdd03 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -792,6 +792,7 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size wde_size0;
const struct rtw89_dle_size wde_size4;
const struct rtw89_dle_size wde_size6;
+ const struct rtw89_dle_size wde_size7;
const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
@@ -804,6 +805,7 @@ struct rtw89_mac_size_set {
const struct rtw89_wde_quota wde_qt0;
const struct rtw89_wde_quota wde_qt4;
const struct rtw89_wde_quota wde_qt6;
+ const struct rtw89_wde_quota wde_qt7;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
const struct rtw89_ple_quota ple_qt4;
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index ee4588b61b8f..c42e31069035 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -89,15 +89,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
!(hw->conf.flags & IEEE80211_CONF_IDLE))
rtw89_leave_ips(rtwdev);
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- if (hw->conf.flags & IEEE80211_CONF_PS) {
- rtwdev->lps_enabled = true;
- } else {
- rtw89_leave_lps(rtwdev);
- rtwdev->lps_enabled = false;
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
&hw->conf.chandef);
@@ -168,6 +159,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_core_txq_init(rtwdev, vif->txq);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
+
+ rtw89_recalc_lps(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
@@ -192,6 +185,7 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
rtw89_mac_remove_vif(rtwdev, rtwvif);
rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
list_del_init(&rtwvif->list);
+ rtw89_recalc_lps(rtwdev);
rtw89_enter_ips_by_hwflags(rtwdev);
mutex_unlock(&rtwdev->mutex);
@@ -451,6 +445,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_CQM)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
+ if (changed & BSS_CHANGED_PS)
+ rtw89_recalc_lps(rtwdev);
+
mutex_unlock(&rtwdev->mutex);
}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index fa94335f699a..84201ef19c17 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -252,3 +252,29 @@ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
rtw89_p2p_disable_all_noa(rtwdev, vif);
rtw89_p2p_update_noa(rtwdev, vif);
}
+
+void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_vif *vif, *found_vif = NULL;
+ struct rtw89_vif *rtwvif;
+ int count = 0;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ count = 0;
+ break;
+ }
+
+ count++;
+ found_vif = vif;
+ }
+
+ if (count == 1 && found_vif->cfg.ps) {
+ rtwdev->lps_enabled = true;
+ } else {
+ rtw89_leave_lps(rtwdev);
+ rtwdev->lps_enabled = false;
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index 73c008db0426..4c18f49204b2 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -15,6 +15,7 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_recalc_lps(struct rtw89_dev *rtwdev);
static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index eaa2ea0586bc..6da1b603a9a9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -18,25 +18,25 @@
RTW8852B_FW_BASENAME "-" __stringify(RTW8852B_FW_FORMAT_MAX) ".bin"
static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = {
- {5, 343, grp_0}, /* ACH 0 */
- {5, 343, grp_0}, /* ACH 1 */
- {5, 343, grp_0}, /* ACH 2 */
- {5, 343, grp_0}, /* ACH 3 */
+ {5, 341, grp_0}, /* ACH 0 */
+ {5, 341, grp_0}, /* ACH 1 */
+ {4, 342, grp_0}, /* ACH 2 */
+ {4, 342, grp_0}, /* ACH 3 */
{0, 0, grp_0}, /* ACH 4 */
{0, 0, grp_0}, /* ACH 5 */
{0, 0, grp_0}, /* ACH 6 */
{0, 0, grp_0}, /* ACH 7 */
- {4, 344, grp_0}, /* B0MGQ */
- {4, 344, grp_0}, /* B0HIQ */
+ {4, 342, grp_0}, /* B0MGQ */
+ {4, 342, grp_0}, /* B0HIQ */
{0, 0, grp_0}, /* B1MGQ */
{0, 0, grp_0}, /* B1HIQ */
{40, 0, 0} /* FWCMDQ */
};
static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = {
- 448, /* Group 0 */
+ 446, /* Group 0 */
0, /* Group 1 */
- 448, /* Public Max */
+ 446, /* Public Max */
0 /* WP threshold */
};
@@ -49,13 +49,13 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = {
};
static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
- &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
- &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
+ &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
&rtw89_mac_size.ple_qt58},
- [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6,
- &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
- &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size7,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
+ &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
&rtw89_mac_size.ple_qt_52b_wow},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
&rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 9a8faaf4c6b6..89c7a1420381 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -5964,10 +5964,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
ret = -ENOMEM;
goto out_free;
}
+ param.pmsr_capa = pmsr_capa;
+
ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info);
if (ret)
goto out_free;
- param.pmsr_capa = pmsr_capa;
}
ret = mac80211_hwsim_new_radio(info, &param);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index c066b0040a3f..829515a601b3 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
struct ipc_mux_config mux_cfg;
struct iosm_imem *ipc_imem;
u8 ctrl_chl_idx = 0;
+ int ret;
ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
if (ipc_imem->phase != IPC_P_RUN) {
dev_err(ipc_imem->dev,
"Modem link down. Exit run state worker.");
- return;
+ goto err_out;
}
if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
ipc_devlink_deinit(ipc_imem->ipc_devlink);
- if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
- ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+ ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
+ if (ret < 0)
+ goto err_out;
+
+ ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+ if (!ipc_imem->mux)
+ goto err_out;
+
+ ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+ if (ret < 0)
+ goto err_ipc_mux_deinit;
- ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
- if (ipc_imem->mux)
- ipc_imem->mux->wwan = ipc_imem->wwan;
+ ipc_imem->mux->wwan = ipc_imem->wwan;
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
@@ -622,6 +630,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
+
+ return;
+
+err_ipc_mux_deinit:
+ ipc_mux_deinit(ipc_imem->mux);
+err_out:
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
}
static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 66b90cc4c346..109cf8930488 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -77,8 +77,8 @@ out:
}
/* Initialize wwan channel */
-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
- enum ipc_mux_protocol mux_type)
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type)
{
struct ipc_chnl_cfg chnl_cfg = { 0 };
@@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
if (ipc_imem->cp_version == -1) {
dev_err(ipc_imem->dev, "invalid CP version");
- return;
+ return -EIO;
}
ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
@@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
/* WWAN registration. */
ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
- if (!ipc_imem->wwan)
+ if (!ipc_imem->wwan) {
dev_err(ipc_imem->dev,
"failed to register the ipc_wwan interfaces");
+ return -ENOMEM;
+ }
+
+ return 0;
}
/* Map SKB to DMA for transfer */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index f8afb217d9e2..026c5bd0f999 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
* MUX.
* @ipc_imem: Pointer to iosm_imem struct.
* @mux_type: Type of mux protocol.
+ *
+ * Return: 0 on success and failure value on error
*/
-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
- enum ipc_mux_protocol mux_type);
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type);
/**
* ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index d6b166fc5c0e..bff46f7ca59f 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -626,14 +626,12 @@ static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
goto adb_decode_err;
- if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
- sizeof(struct mux_adth_dg)))
+ if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth))
goto adb_decode_err;
/* Calculate the number of datagrams. */
nr_of_dg = (le16_to_cpu(adth->table_length) -
- sizeof(struct mux_adth) +
- sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth)) /
sizeof(struct mux_adth_dg);
/* Is the datagram table empty ? */
@@ -649,7 +647,7 @@ static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
}
/* New aggregated datagram table. */
- dg = &adth->dg;
+ dg = adth->dg;
if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
nr_of_dg) < 0)
goto adb_decode_err;
@@ -849,7 +847,7 @@ static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
adth->if_id = i;
adth->table_length = cpu_to_le16(adth_dg_size);
adth_dg_size -= offsetof(struct mux_adth, dg);
- memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
+ memcpy(adth->dg, ul_adb->dg[i], adth_dg_size);
ul_adb->if_cnt++;
}
@@ -1426,14 +1424,13 @@ static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
nr_of_dg = (le16_to_cpu(adth->table_length) -
- sizeof(struct mux_adth) +
- sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth)) /
sizeof(struct mux_adth_dg);
if (nr_of_dg <= 0)
return payload_size;
- dg = &adth->dg;
+ dg = adth->dg;
for (i = 0; i < nr_of_dg; i++, dg++) {
if (le32_to_cpu(dg->datagram_index) <
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
index 5d4e3b89542c..f8df88f816c4 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -161,7 +161,7 @@ struct mux_adth {
u8 opt_ipv4v6;
__le32 next_table_index;
__le32 reserved2;
- struct mux_adth_dg dg;
+ struct mux_adth_dg dg[];
};
/**
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 226fc1703e90..91256e005b84 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -45,6 +45,7 @@
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
+#define T7XX_INIT_TIMEOUT 20
#define PM_SLEEP_DIS_TIMEOUT_MS 20
#define PM_ACK_TIMEOUT_MS 1500
#define PM_AUTOSUSPEND_MS 20000
@@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
spin_lock_init(&t7xx_dev->md_pm_lock);
init_completion(&t7xx_dev->sleep_lock_acquire);
init_completion(&t7xx_dev->pm_sr_ack);
+ init_completion(&t7xx_dev->init_done);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
device_init_wakeup(&pdev->dev, true);
@@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+ complete_all(&t7xx_dev->init_done);
}
static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
@@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev)
__t7xx_pci_pm_suspend(pdev);
}
+static int t7xx_pci_pm_prepare(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct t7xx_pci_dev *t7xx_dev;
+
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
+ dev_warn(dev, "Not ready for system sleep.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int t7xx_pci_pm_suspend(struct device *dev)
{
return __t7xx_pci_pm_suspend(to_pci_dev(dev));
@@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops t7xx_pci_pm_ops = {
+ .prepare = t7xx_pci_pm_prepare,
.suspend = t7xx_pci_pm_suspend,
.resume = t7xx_pci_pm_resume,
.resume_noirq = t7xx_pci_pm_resume_noirq,
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index 112efa534eac..f08f1ab74469 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -69,6 +69,7 @@ struct t7xx_pci_dev {
struct t7xx_modem *md;
struct t7xx_ccmni_ctrl *ccmni_ctlb;
bool rgu_pci_irq_en;
+ struct completion init_done;
/* Low Power Items */
struct list_head md_pm_entities;
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index f12f903a9dd1..da3e2dce8e70 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -762,3 +762,6 @@ EXPORT_SYMBOL(fdp_nci_remove);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("NFC NCI driver for Intel Fields Peak NFC controller");
MODULE_AUTHOR("Robert Dolca <robert.dolca@intel.com>");
+
+MODULE_FIRMWARE(FDP_OTP_PATCH_NAME);
+MODULE_FIRMWARE(FDP_RAM_PATCH_NAME);
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 44eeb17ae48d..a55381f80cd6 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root;
static void nfcsim_debugfs_init(void)
{
nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
-
- if (!nfcsim_debugfs_root)
- pr_err("Could not create debugfs entry\n");
-
}
static void nfcsim_debugfs_remove(void)
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index f70ba58dbc55..ab0f32b901c8 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -32,6 +32,13 @@
/* Globals */
+/* The "nubus.populate_procfs" parameter makes slot resources available in
+ * procfs. It's deprecated and disabled by default because procfs is no longer
+ * thought to be suitable for that and some board ROMs make it too expensive.
+ */
+bool nubus_populate_procfs;
+module_param_named(populate_procfs, nubus_populate_procfs, bool, 0);
+
LIST_HEAD(nubus_func_rsrcs);
/* Meaning of "bytelanes":
@@ -572,9 +579,9 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
- /* Local/Private resources have their own
- function */
- nubus_get_private_resource(fres, dir.procdir, &ent);
+ if (nubus_populate_procfs)
+ nubus_get_private_resource(fres, dir.procdir,
+ &ent);
}
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 1fd667852271..e7a347db708c 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -55,7 +55,7 @@ struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{
char name[2];
- if (!proc_bus_nubus_dir)
+ if (!proc_bus_nubus_dir || !nubus_populate_procfs)
return NULL;
snprintf(name, sizeof(name), "%x", board->slot);
return proc_mkdir(name, proc_bus_nubus_dir);
@@ -72,9 +72,10 @@ struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
char name[9];
int lanes = board->lanes;
- if (!procdir)
+ if (!procdir || !nubus_populate_procfs)
return NULL;
snprintf(name, sizeof(name), "%x", ent->type);
+ remove_proc_subtree(name, procdir);
return proc_mkdir_data(name, 0555, procdir, (void *)lanes);
}
@@ -137,6 +138,18 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
return 0;
}
+static int nubus_rsrc_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nubus_proc_rsrc_show, inode);
+}
+
+static const struct proc_ops nubus_rsrc_proc_ops = {
+ .proc_open = nubus_rsrc_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent,
unsigned int size)
@@ -144,7 +157,7 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
char name[9];
struct nubus_proc_pde_data *pded;
- if (!procdir)
+ if (!procdir || !nubus_populate_procfs)
return;
snprintf(name, sizeof(name), "%x", ent->type);
@@ -152,8 +165,9 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
else
pded = NULL;
- proc_create_single_data(name, S_IFREG | 0444, procdir,
- nubus_proc_rsrc_show, pded);
+ remove_proc_subtree(name, procdir);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_rsrc_proc_ops, pded);
}
void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
@@ -162,13 +176,14 @@ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
char name[9];
unsigned char *data = (unsigned char *)ent->data;
- if (!procdir)
+ if (!procdir || !nubus_populate_procfs)
return;
snprintf(name, sizeof(name), "%x", ent->type);
- proc_create_single_data(name, S_IFREG | 0444, procdir,
- nubus_proc_rsrc_show,
- nubus_proc_alloc_pde_data(data, 0));
+ remove_proc_subtree(name, procdir);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_rsrc_proc_ops,
+ nubus_proc_alloc_pde_data(data, 0));
}
/*
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index e27202d22c7d..d3fc5063e4be 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y += core.o ioctl.o
+nvme-core-y += core.o ioctl.o sysfs.o
nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index ea16a0aba679..daf5d144a8ea 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -30,18 +30,18 @@ struct nvme_dhchap_queue_context {
u32 s2;
u16 transaction;
u8 status;
+ u8 dhgroup_id;
u8 hash_id;
size_t hash_len;
- u8 dhgroup_id;
u8 c1[64];
u8 c2[64];
u8 response[64];
u8 *host_response;
u8 *ctrl_key;
- int ctrl_key_len;
u8 *host_key;
- int host_key_len;
u8 *sess_key;
+ int ctrl_key_len;
+ int host_key_len;
int sess_key_len;
};
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index bc523ca02254..5e4f8848dce0 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -21,7 +21,7 @@ static const char * const nvme_ops[] = {
[nvme_cmd_resv_release] = "Reservation Release",
[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
- [nvme_cmd_zone_append] = "Zone Management Append",
+ [nvme_cmd_zone_append] = "Zone Append",
};
static const char * const nvme_admin_ops[] = {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ccb6eb1282f8..fdfcf2781c85 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -237,7 +237,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
-static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
{
/*
* Keep a reference until nvme_do_delete_ctrl() complete,
@@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req)
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- if (ctrl->kas)
+ /*
+ * Completions of long-running commands should not be able to
+ * defer sending of periodic keep alives, since the controller
+ * may have completed processing such commands a long time ago
+ * (arbitrarily close to command submission time).
+ * req->deadline - req->timeout is the command submission time
+ * in jiffies.
+ */
+ if (ctrl->kas &&
+ req->deadline - req->timeout >= ctrl->ka_last_check_time)
ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
@@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
}
EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
@@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
nvme_queue_scan(ctrl);
flush_work(&ctrl->scan_work);
}
+ if (ns)
+ return;
switch (cmd->common.opcode) {
case nvme_admin_set_features:
@@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
* The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..].
*/
+static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
+{
+ unsigned long delay = ctrl->kato * HZ / 2;
+
+ /*
+ * When using Traffic Based Keep Alive, we need to run
+ * nvme_keep_alive_work at twice the normal frequency, as one
+ * command completion can postpone sending a keep alive command
+ * by up to twice the delay between runs.
+ */
+ if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
+ delay /= 2;
+ return delay;
+}
+
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work,
+ nvme_keep_alive_work_period(ctrl));
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
+ unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
+
+ /*
+ * Subtract off the keepalive RTT so nvme_keep_alive_work runs
+ * at the desired frequency.
+ */
+ if (rtt <= delay) {
+ delay -= rtt;
+ } else {
+ dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
+ jiffies_to_msecs(rtt));
+ delay = 0;
+ }
blk_mq_free_request(rq);
@@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
return RQ_END_IO_NONE;
}
+ ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false;
spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->state == NVME_CTRL_LIVE ||
@@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- nvme_queue_keep_alive_work(ctrl);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
return RQ_END_IO_NONE;
}
@@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
bool comp_seen = ctrl->comp_seen;
struct request *rq;
+ ctrl->ka_last_check_time = jiffies;
+
if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
@@ -1591,12 +1635,12 @@ static void nvme_ns_release(struct nvme_ns *ns)
nvme_put_ns(ns);
}
-static int nvme_open(struct block_device *bdev, fmode_t mode)
+static int nvme_open(struct gendisk *disk, blk_mode_t mode)
{
- return nvme_ns_open(bdev->bd_disk->private_data);
+ return nvme_ns_open(disk->private_data);
}
-static void nvme_release(struct gendisk *disk, fmode_t mode)
+static void nvme_release(struct gendisk *disk)
{
nvme_ns_release(disk->private_data);
}
@@ -1835,7 +1879,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
- unsigned short bs = 1 << ns->lba_shift;
+ u32 bs = 1U << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
/*
@@ -2256,7 +2300,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
#define nvme_report_zones NULL
#endif /* CONFIG_BLK_DEV_ZONED */
-static const struct block_device_operations nvme_bdev_ops = {
+const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
@@ -2791,75 +2835,6 @@ static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
return NULL;
}
-#define SUBSYS_ATTR_RO(_name, _mode, _show) \
- struct device_attribute subsys_attr_##_name = \
- __ATTR(_name, _mode, _show, NULL)
-
-static ssize_t nvme_subsys_show_nqn(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_subsystem *subsys =
- container_of(dev, struct nvme_subsystem, dev);
-
- return sysfs_emit(buf, "%s\n", subsys->subnqn);
-}
-static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
-
-static ssize_t nvme_subsys_show_type(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_subsystem *subsys =
- container_of(dev, struct nvme_subsystem, dev);
-
- switch (subsys->subtype) {
- case NVME_NQN_DISC:
- return sysfs_emit(buf, "discovery\n");
- case NVME_NQN_NVME:
- return sysfs_emit(buf, "nvm\n");
- default:
- return sysfs_emit(buf, "reserved\n");
- }
-}
-static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
-
-#define nvme_subsys_show_str_function(field) \
-static ssize_t subsys_##field##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct nvme_subsystem *subsys = \
- container_of(dev, struct nvme_subsystem, dev); \
- return sysfs_emit(buf, "%.*s\n", \
- (int)sizeof(subsys->field), subsys->field); \
-} \
-static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
-
-nvme_subsys_show_str_function(model);
-nvme_subsys_show_str_function(serial);
-nvme_subsys_show_str_function(firmware_rev);
-
-static struct attribute *nvme_subsys_attrs[] = {
- &subsys_attr_model.attr,
- &subsys_attr_serial.attr,
- &subsys_attr_firmware_rev.attr,
- &subsys_attr_subsysnqn.attr,
- &subsys_attr_subsystype.attr,
-#ifdef CONFIG_NVME_MULTIPATH
- &subsys_attr_iopolicy.attr,
-#endif
- NULL,
-};
-
-static const struct attribute_group nvme_subsys_attrs_group = {
- .attrs = nvme_subsys_attrs,
-};
-
-static const struct attribute_group *nvme_subsys_attrs_groups[] = {
- &nvme_subsys_attrs_group,
- NULL,
-};
-
static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
{
return ctrl->opts && ctrl->opts->discovery_nqn;
@@ -3064,7 +3039,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
ctrl->max_zeroes_sectors = 0;
if (ctrl->subsys->subtype != NVME_NQN_NVME ||
- nvme_ctrl_limited_cns(ctrl))
+ nvme_ctrl_limited_cns(ctrl) ||
+ test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
return 0;
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -3086,6 +3062,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
free_data:
+ if (ret > 0)
+ set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
kfree(id);
return ret;
}
@@ -3393,583 +3371,6 @@ static const struct file_operations nvme_dev_fops = {
.uring_cmd = nvme_dev_uring_cmd,
};
-static ssize_t nvme_sysfs_reset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- int ret;
-
- ret = nvme_reset_ctrl_sync(ctrl);
- if (ret < 0)
- return ret;
- return count;
-}
-static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
-
-static ssize_t nvme_sysfs_rescan(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- nvme_queue_scan(ctrl);
- return count;
-}
-static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
-
-static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
-{
- struct gendisk *disk = dev_to_disk(dev);
-
- if (disk->fops == &nvme_bdev_ops)
- return nvme_get_ns_from_dev(dev)->head;
- else
- return disk->private_data;
-}
-
-static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ns_head *head = dev_to_ns_head(dev);
- struct nvme_ns_ids *ids = &head->ids;
- struct nvme_subsystem *subsys = head->subsys;
- int serial_len = sizeof(subsys->serial);
- int model_len = sizeof(subsys->model);
-
- if (!uuid_is_null(&ids->uuid))
- return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
-
- if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
-
- if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
-
- while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
- subsys->serial[serial_len - 1] == '\0'))
- serial_len--;
- while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
- subsys->model[model_len - 1] == '\0'))
- model_len--;
-
- return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
- serial_len, subsys->serial, model_len, subsys->model,
- head->ns_id);
-}
-static DEVICE_ATTR_RO(wwid);
-
-static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
-}
-static DEVICE_ATTR_RO(nguid);
-
-static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
-
- /* For backward compatibility expose the NGUID to userspace if
- * we have no UUID set
- */
- if (uuid_is_null(&ids->uuid)) {
- dev_warn_ratelimited(dev,
- "No UUID available providing old NGUID\n");
- return sysfs_emit(buf, "%pU\n", ids->nguid);
- }
- return sysfs_emit(buf, "%pU\n", &ids->uuid);
-}
-static DEVICE_ATTR_RO(uuid);
-
-static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
-}
-static DEVICE_ATTR_RO(eui);
-
-static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
-}
-static DEVICE_ATTR_RO(nsid);
-
-static struct attribute *nvme_ns_id_attrs[] = {
- &dev_attr_wwid.attr,
- &dev_attr_uuid.attr,
- &dev_attr_nguid.attr,
- &dev_attr_eui.attr,
- &dev_attr_nsid.attr,
-#ifdef CONFIG_NVME_MULTIPATH
- &dev_attr_ana_grpid.attr,
- &dev_attr_ana_state.attr,
-#endif
- NULL,
-};
-
-static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
-
- if (a == &dev_attr_uuid.attr) {
- if (uuid_is_null(&ids->uuid) &&
- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- return 0;
- }
- if (a == &dev_attr_nguid.attr) {
- if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- return 0;
- }
- if (a == &dev_attr_eui.attr) {
- if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- return 0;
- }
-#ifdef CONFIG_NVME_MULTIPATH
- if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
- if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
- return 0;
- if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
- return 0;
- }
-#endif
- return a->mode;
-}
-
-static const struct attribute_group nvme_ns_id_attr_group = {
- .attrs = nvme_ns_id_attrs,
- .is_visible = nvme_ns_id_attrs_are_visible,
-};
-
-const struct attribute_group *nvme_ns_id_attr_groups[] = {
- &nvme_ns_id_attr_group,
- NULL,
-};
-
-#define nvme_show_str_function(field) \
-static ssize_t field##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sysfs_emit(buf, "%.*s\n", \
- (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
-} \
-static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
-
-nvme_show_str_function(model);
-nvme_show_str_function(serial);
-nvme_show_str_function(firmware_rev);
-
-#define nvme_show_int_function(field) \
-static ssize_t field##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sysfs_emit(buf, "%d\n", ctrl->field); \
-} \
-static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
-
-nvme_show_int_function(cntlid);
-nvme_show_int_function(numa_node);
-nvme_show_int_function(queue_count);
-nvme_show_int_function(sqsize);
-nvme_show_int_function(kato);
-
-static ssize_t nvme_sysfs_delete(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (device_remove_file_self(dev, attr))
- nvme_delete_ctrl_sync(ctrl);
- return count;
-}
-static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
-
-static ssize_t nvme_sysfs_show_transport(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ctrl->ops->name);
-}
-static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
-
-static ssize_t nvme_sysfs_show_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- static const char *const state_name[] = {
- [NVME_CTRL_NEW] = "new",
- [NVME_CTRL_LIVE] = "live",
- [NVME_CTRL_RESETTING] = "resetting",
- [NVME_CTRL_CONNECTING] = "connecting",
- [NVME_CTRL_DELETING] = "deleting",
- [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
- [NVME_CTRL_DEAD] = "dead",
- };
-
- if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
- state_name[ctrl->state])
- return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
-
- return sysfs_emit(buf, "unknown state\n");
-}
-
-static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
-
-static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
-}
-static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
-
-static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
-}
-static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
-
-static ssize_t nvme_sysfs_show_hostid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
-}
-static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
-
-static ssize_t nvme_sysfs_show_address(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
-}
-static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
-
-static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
-
- if (ctrl->opts->max_reconnects == -1)
- return sysfs_emit(buf, "off\n");
- return sysfs_emit(buf, "%d\n",
- opts->max_reconnects * opts->reconnect_delay);
-}
-
-static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
- int ctrl_loss_tmo, err;
-
- err = kstrtoint(buf, 10, &ctrl_loss_tmo);
- if (err)
- return -EINVAL;
-
- if (ctrl_loss_tmo < 0)
- opts->max_reconnects = -1;
- else
- opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
- opts->reconnect_delay);
- return count;
-}
-static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
- nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
-
-static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (ctrl->opts->reconnect_delay == -1)
- return sysfs_emit(buf, "off\n");
- return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
-}
-
-static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- unsigned int v;
- int err;
-
- err = kstrtou32(buf, 10, &v);
- if (err)
- return err;
-
- ctrl->opts->reconnect_delay = v;
- return count;
-}
-static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
- nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
-
-static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (ctrl->opts->fast_io_fail_tmo == -1)
- return sysfs_emit(buf, "off\n");
- return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
-}
-
-static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
- int fast_io_fail_tmo, err;
-
- err = kstrtoint(buf, 10, &fast_io_fail_tmo);
- if (err)
- return -EINVAL;
-
- if (fast_io_fail_tmo < 0)
- opts->fast_io_fail_tmo = -1;
- else
- opts->fast_io_fail_tmo = fast_io_fail_tmo;
- return count;
-}
-static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
- nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
-
-static ssize_t cntrltype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- static const char * const type[] = {
- [NVME_CTRL_IO] = "io\n",
- [NVME_CTRL_DISC] = "discovery\n",
- [NVME_CTRL_ADMIN] = "admin\n",
- };
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
- return sysfs_emit(buf, "reserved\n");
-
- return sysfs_emit(buf, type[ctrl->cntrltype]);
-}
-static DEVICE_ATTR_RO(cntrltype);
-
-static ssize_t dctype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- static const char * const type[] = {
- [NVME_DCTYPE_NOT_REPORTED] = "none\n",
- [NVME_DCTYPE_DDC] = "ddc\n",
- [NVME_DCTYPE_CDC] = "cdc\n",
- };
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
- return sysfs_emit(buf, "reserved\n");
-
- return sysfs_emit(buf, type[ctrl->dctype]);
-}
-static DEVICE_ATTR_RO(dctype);
-
-#ifdef CONFIG_NVME_AUTH
-static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
-
- if (!opts->dhchap_secret)
- return sysfs_emit(buf, "none\n");
- return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
-}
-
-static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
- char *dhchap_secret;
-
- if (!ctrl->opts->dhchap_secret)
- return -EINVAL;
- if (count < 7)
- return -EINVAL;
- if (memcmp(buf, "DHHC-1:", 7))
- return -EINVAL;
-
- dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
- if (!dhchap_secret)
- return -ENOMEM;
- memcpy(dhchap_secret, buf, count);
- nvme_auth_stop(ctrl);
- if (strcmp(dhchap_secret, opts->dhchap_secret)) {
- struct nvme_dhchap_key *key, *host_key;
- int ret;
-
- ret = nvme_auth_generate_key(dhchap_secret, &key);
- if (ret)
- return ret;
- kfree(opts->dhchap_secret);
- opts->dhchap_secret = dhchap_secret;
- host_key = ctrl->host_key;
- mutex_lock(&ctrl->dhchap_auth_mutex);
- ctrl->host_key = key;
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- nvme_auth_free_key(host_key);
- }
- /* Start re-authentication */
- dev_info(ctrl->device, "re-authenticating controller\n");
- queue_work(nvme_wq, &ctrl->dhchap_auth_work);
-
- return count;
-}
-static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
- nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
-
-static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
-
- if (!opts->dhchap_ctrl_secret)
- return sysfs_emit(buf, "none\n");
- return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
-}
-
-static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- struct nvmf_ctrl_options *opts = ctrl->opts;
- char *dhchap_secret;
-
- if (!ctrl->opts->dhchap_ctrl_secret)
- return -EINVAL;
- if (count < 7)
- return -EINVAL;
- if (memcmp(buf, "DHHC-1:", 7))
- return -EINVAL;
-
- dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
- if (!dhchap_secret)
- return -ENOMEM;
- memcpy(dhchap_secret, buf, count);
- nvme_auth_stop(ctrl);
- if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
- struct nvme_dhchap_key *key, *ctrl_key;
- int ret;
-
- ret = nvme_auth_generate_key(dhchap_secret, &key);
- if (ret)
- return ret;
- kfree(opts->dhchap_ctrl_secret);
- opts->dhchap_ctrl_secret = dhchap_secret;
- ctrl_key = ctrl->ctrl_key;
- mutex_lock(&ctrl->dhchap_auth_mutex);
- ctrl->ctrl_key = key;
- mutex_unlock(&ctrl->dhchap_auth_mutex);
- nvme_auth_free_key(ctrl_key);
- }
- /* Start re-authentication */
- dev_info(ctrl->device, "re-authenticating controller\n");
- queue_work(nvme_wq, &ctrl->dhchap_auth_work);
-
- return count;
-}
-static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
- nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
-#endif
-
-static struct attribute *nvme_dev_attrs[] = {
- &dev_attr_reset_controller.attr,
- &dev_attr_rescan_controller.attr,
- &dev_attr_model.attr,
- &dev_attr_serial.attr,
- &dev_attr_firmware_rev.attr,
- &dev_attr_cntlid.attr,
- &dev_attr_delete_controller.attr,
- &dev_attr_transport.attr,
- &dev_attr_subsysnqn.attr,
- &dev_attr_address.attr,
- &dev_attr_state.attr,
- &dev_attr_numa_node.attr,
- &dev_attr_queue_count.attr,
- &dev_attr_sqsize.attr,
- &dev_attr_hostnqn.attr,
- &dev_attr_hostid.attr,
- &dev_attr_ctrl_loss_tmo.attr,
- &dev_attr_reconnect_delay.attr,
- &dev_attr_fast_io_fail_tmo.attr,
- &dev_attr_kato.attr,
- &dev_attr_cntrltype.attr,
- &dev_attr_dctype.attr,
-#ifdef CONFIG_NVME_AUTH
- &dev_attr_dhchap_secret.attr,
- &dev_attr_dhchap_ctrl_secret.attr,
-#endif
- NULL
-};
-
-static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
-
- if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
- return 0;
- if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
- return 0;
- if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
- return 0;
- if (a == &dev_attr_hostid.attr && !ctrl->opts)
- return 0;
- if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
- return 0;
- if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
- return 0;
- if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
- return 0;
-#ifdef CONFIG_NVME_AUTH
- if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
- return 0;
- if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
- return 0;
-#endif
-
- return a->mode;
-}
-
-const struct attribute_group nvme_dev_attrs_group = {
- .attrs = nvme_dev_attrs,
- .is_visible = nvme_dev_attrs_are_visible,
-};
-EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
-
-static const struct attribute_group *nvme_dev_attr_groups[] = {
- &nvme_dev_attrs_group,
- NULL,
-};
-
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid)
{
@@ -4209,7 +3610,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
goto out_put_ns_head;
}
- if (!multipath && !list_empty(&head->list)) {
+ if (!multipath) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
info->nsid);
@@ -4310,7 +3711,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
* instance as shared namespaces will show up as multiple block
* devices.
*/
- if (ns->head->disk) {
+ if (nvme_ns_head_multipath(ns->head)) {
sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->instance, ns->head->instance);
disk->flags |= GENHD_FL_HIDDEN;
@@ -5045,7 +4446,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
* that were missed. We identify persistent discovery controllers by
* checking that they started once before, hence are reconnecting back.
*/
- if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+ if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
nvme_discovery_ctrl(ctrl))
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
@@ -5056,6 +4457,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
}
nvme_change_uevent(ctrl, "NVME_EVENT=connected");
+ set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@@ -5195,6 +4597,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
return 0;
out_free_cdev:
+ nvme_fault_inject_fini(&ctrl->fault_inject);
+ dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
out_free_name:
nvme_put_ctrl(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 0069ebff85df..8175d49f2909 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -21,35 +21,60 @@ static DEFINE_MUTEX(nvmf_hosts_mutex);
static struct nvmf_host *nvmf_default_host;
-static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
+static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
{
struct nvmf_host *host;
- list_for_each_entry(host, &nvmf_hosts, list) {
- if (!strcmp(host->nqn, hostnqn))
- return host;
- }
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
- return NULL;
+ kref_init(&host->ref);
+ uuid_copy(&host->id, id);
+ strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+
+ return host;
}
-static struct nvmf_host *nvmf_host_add(const char *hostnqn)
+static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
{
struct nvmf_host *host;
mutex_lock(&nvmf_hosts_mutex);
- host = __nvmf_host_find(hostnqn);
- if (host) {
- kref_get(&host->ref);
- goto out_unlock;
+
+ /*
+ * We have defined a host as how it is perceived by the target.
+ * Therefore, we don't allow different Host NQNs with the same Host ID.
+ * Similarly, we do not allow the usage of the same Host NQN with
+ * different Host IDs. This'll maintain unambiguous host identification.
+ */
+ list_for_each_entry(host, &nvmf_hosts, list) {
+ bool same_hostnqn = !strcmp(host->nqn, hostnqn);
+ bool same_hostid = uuid_equal(&host->id, id);
+
+ if (same_hostnqn && same_hostid) {
+ kref_get(&host->ref);
+ goto out_unlock;
+ }
+ if (same_hostnqn) {
+ pr_err("found same hostnqn %s but different hostid %pUb\n",
+ hostnqn, id);
+ host = ERR_PTR(-EINVAL);
+ goto out_unlock;
+ }
+ if (same_hostid) {
+ pr_err("found same hostid %pUb but different hostnqn %s\n",
+ id, hostnqn);
+ host = ERR_PTR(-EINVAL);
+ goto out_unlock;
+ }
}
- host = kmalloc(sizeof(*host), GFP_KERNEL);
- if (!host)
+ host = nvmf_host_alloc(hostnqn, id);
+ if (!host) {
+ host = ERR_PTR(-ENOMEM);
goto out_unlock;
-
- kref_init(&host->ref);
- strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ }
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -60,16 +85,17 @@ out_unlock:
static struct nvmf_host *nvmf_host_default(void)
{
struct nvmf_host *host;
+ char nqn[NVMF_NQN_SIZE];
+ uuid_t id;
- host = kmalloc(sizeof(*host), GFP_KERNEL);
+ uuid_gen(&id);
+ snprintf(nqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
+
+ host = nvmf_host_alloc(nqn, &id);
if (!host)
return NULL;
- kref_init(&host->ref);
- uuid_gen(&host->id);
- snprintf(host->nqn, NVMF_NQN_SIZE,
- "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
-
mutex_lock(&nvmf_hosts_mutex);
list_add_tail(&host->list, &nvmf_hosts);
mutex_unlock(&nvmf_hosts_mutex);
@@ -349,6 +375,45 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
}
}
+static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
+ u16 cntlid)
+{
+ struct nvmf_connect_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ uuid_copy(&data->hostid, &ctrl->opts->host->id);
+ data->cntlid = cpu_to_le16(cntlid);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ return data;
+}
+
+static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
+ struct nvme_command *cmd)
+{
+ cmd->connect.opcode = nvme_fabrics_command;
+ cmd->connect.fctype = nvme_fabrics_type_connect;
+ cmd->connect.qid = cpu_to_le16(qid);
+
+ if (qid) {
+ cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ } else {
+ cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
+
+ /*
+ * set keep-alive timeout in seconds granularity (ms * 1000)
+ */
+ cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
+ }
+
+ if (ctrl->opts->disable_sqflow)
+ cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+}
+
/**
* nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
* API function.
@@ -377,28 +442,12 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
int ret;
u32 result;
- cmd.connect.opcode = nvme_fabrics_command;
- cmd.connect.fctype = nvme_fabrics_type_connect;
- cmd.connect.qid = 0;
- cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
-
- /*
- * Set keep-alive timeout in seconds granularity (ms * 1000)
- */
- cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
-
- if (ctrl->opts->disable_sqflow)
- cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+ nvmf_connect_cmd_prep(ctrl, 0, &cmd);
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = nvmf_connect_data_prep(ctrl, 0xffff);
if (!data)
return -ENOMEM;
- uuid_copy(&data->hostid, &ctrl->opts->host->id);
- data->cntlid = cpu_to_le16(0xffff);
- strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
- strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
-
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
@@ -468,23 +517,12 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
int ret;
u32 result;
- cmd.connect.opcode = nvme_fabrics_command;
- cmd.connect.fctype = nvme_fabrics_type_connect;
- cmd.connect.qid = cpu_to_le16(qid);
- cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ nvmf_connect_cmd_prep(ctrl, qid, &cmd);
- if (ctrl->opts->disable_sqflow)
- cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = nvmf_connect_data_prep(ctrl, ctrl->cntlid);
if (!data)
return -ENOMEM;
- uuid_copy(&data->hostid, &ctrl->opts->host->id);
- data->cntlid = cpu_to_le16(ctrl->cntlid);
- strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
- strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
-
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
@@ -621,6 +659,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
size_t nqnlen = 0;
int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
uuid_t hostid;
+ char hostnqn[NVMF_NQN_SIZE];
/* Set defaults */
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
@@ -637,7 +676,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (!options)
return -ENOMEM;
- uuid_gen(&hostid);
+ /* use default host if not given by user space */
+ uuid_copy(&hostid, &nvmf_default_host->id);
+ strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE);
while ((p = strsep(&o, ",\n")) != NULL) {
if (!*p)
@@ -783,12 +824,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL;
goto out;
}
- opts->host = nvmf_host_add(p);
+ strscpy(hostnqn, p, NVMF_NQN_SIZE);
kfree(p);
- if (!opts->host) {
- ret = -ENOMEM;
- goto out;
- }
break;
case NVMF_OPT_RECONNECT_DELAY:
if (match_int(args, &token)) {
@@ -945,18 +982,94 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->fast_io_fail_tmo, ctrl_loss_tmo);
}
- if (!opts->host) {
- kref_get(&nvmf_default_host->ref);
- opts->host = nvmf_default_host;
+ opts->host = nvmf_host_add(hostnqn, &hostid);
+ if (IS_ERR(opts->host)) {
+ ret = PTR_ERR(opts->host);
+ opts->host = NULL;
+ goto out;
}
- uuid_copy(&opts->host->id, &hostid);
-
out:
kfree(options);
return ret;
}
+void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
+ u32 io_queues[HCTX_MAX_TYPES])
+{
+ if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
+ /*
+ * separate read/write queues
+ * hand out dedicated default queues only after we have
+ * sufficient read queues.
+ */
+ io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
+ nr_io_queues -= io_queues[HCTX_TYPE_READ];
+ io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_write_queues, nr_io_queues);
+ nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /*
+ * shared read/write queues
+ * either no write queues were requested, or we don't have
+ * sufficient queue count to have dedicated default queues.
+ */
+ io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_io_queues, nr_io_queues);
+ nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
+ }
+
+ if (opts->nr_poll_queues && nr_io_queues) {
+ /* map dedicated poll queues only if we have queues left */
+ io_queues[HCTX_TYPE_POLL] =
+ min(opts->nr_poll_queues, nr_io_queues);
+ }
+}
+EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
+
+void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
+ u32 io_queues[HCTX_MAX_TYPES])
+{
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
+ /* separate read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ io_queues[HCTX_TYPE_READ];
+ set->map[HCTX_TYPE_READ].queue_offset =
+ io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /* shared read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_READ].queue_offset = 0;
+ }
+
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
+ if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
+ /* map dedicated poll queues only if we have queues left */
+ set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
+ set->map[HCTX_TYPE_POLL].queue_offset =
+ io_queues[HCTX_TYPE_DEFAULT] +
+ io_queues[HCTX_TYPE_READ];
+ blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
+ }
+
+ dev_info(ctrl->device,
+ "mapped %d/%d/%d default/read/poll queues.\n",
+ io_queues[HCTX_TYPE_DEFAULT],
+ io_queues[HCTX_TYPE_READ],
+ io_queues[HCTX_TYPE_POLL]);
+}
+EXPORT_SYMBOL_GPL(nvmf_map_queues);
+
static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts)
{
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index dcac3df8a5f7..82e7a27ffbde 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -77,6 +77,9 @@ enum {
* with the parsing opts enum.
* @mask: Used by the fabrics library to parse through sysfs options
* on adding a NVMe controller.
+ * @max_reconnects: maximum number of allowed reconnect attempts before removing
+ * the controller, (-1) means reconnect forever, zero means remove
+ * immediately;
* @transport: Holds the fabric transport "technology name" (for a lack of
* better description) that will be used by an NVMe controller
* being added.
@@ -96,9 +99,6 @@ enum {
* @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
* @kato: Keep-alive timeout.
* @host: Virtual NVMe host, contains the NQN and Host ID.
- * @max_reconnects: maximum number of allowed reconnect attempts before removing
- * the controller, (-1) means reconnect forever, zero means remove
- * immediately;
* @dhchap_secret: DH-HMAC-CHAP secret
* @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
* authentication
@@ -112,6 +112,7 @@ enum {
*/
struct nvmf_ctrl_options {
unsigned mask;
+ int max_reconnects;
char *transport;
char *subsysnqn;
char *traddr;
@@ -125,7 +126,6 @@ struct nvmf_ctrl_options {
bool duplicate_connect;
unsigned int kato;
struct nvmf_host *host;
- int max_reconnects;
char *dhchap_secret;
char *dhchap_ctrl_secret;
bool disable_sqflow;
@@ -181,7 +181,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
- memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
+ !uuid_equal(&opts->host->id, &ctrl->opts->host->id))
return false;
return true;
@@ -203,6 +203,13 @@ static inline void nvmf_complete_timed_out_request(struct request *rq)
}
}
+static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
+{
+ return min(opts->nr_io_queues, num_online_cpus()) +
+ min(opts->nr_write_queues, num_online_cpus()) +
+ min(opts->nr_poll_queues, num_online_cpus());
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
@@ -215,5 +222,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
+void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
+ u32 io_queues[HCTX_MAX_TYPES]);
+void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
+ u32 io_queues[HCTX_MAX_TYPES]);
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 9e6e56c20ec9..316f3e4ca7cc 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
case hwmon_temp_max:
case hwmon_temp_min:
if ((!channel && data->ctrl->wctemp) ||
- (channel && data->log->temp_sensor[channel - 1])) {
+ (channel && data->log->temp_sensor[channel - 1] &&
+ !(data->ctrl->quirks &
+ NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) {
if (data->ctrl->quirks &
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
return 0444;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 81c5c9e38477..2130ad65b58c 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -14,7 +14,7 @@ enum {
};
static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
- unsigned int flags, fmode_t mode)
+ unsigned int flags, bool open_for_write)
{
u32 effects;
@@ -80,7 +80,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
* writing.
*/
if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
- return mode & FMODE_WRITE;
+ return open_for_write;
return true;
}
@@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
blk_mq_free_request(req);
if (effects)
- nvme_passthru_end(ctrl, effects, cmd, ret);
+ nvme_passthru_end(ctrl, ns, effects, cmd, ret);
return ret;
}
@@ -337,7 +337,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
- fmode_t mode)
+ bool open_for_write)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
@@ -365,7 +365,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
- if (!nvme_cmd_allowed(ns, &c, 0, mode))
+ if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
return -EACCES;
if (cmd.timeout_ms)
@@ -385,7 +385,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
- fmode_t mode)
+ bool open_for_write)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
@@ -412,7 +412,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
- if (!nvme_cmd_allowed(ns, &c, flags, mode))
+ if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
return -EACCES;
if (cmd.timeout_ms)
@@ -521,7 +521,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
else
- io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
return RQ_END_IO_FREE;
}
@@ -543,7 +543,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
if (cookie != NULL && blk_rq_is_poll(req))
nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
else
- io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
return RQ_END_IO_NONE;
}
@@ -583,7 +583,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
- if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
+ if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
return -EACCES;
d.metadata = READ_ONCE(cmd->metadata);
@@ -649,13 +649,13 @@ static bool is_ctrl_ioctl(unsigned int cmd)
}
static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
- void __user *argp, fmode_t mode)
+ void __user *argp, bool open_for_write)
{
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
+ return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
+ return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
default:
return sed_ioctl(ctrl->opal_dev, cmd, argp);
}
@@ -680,14 +680,14 @@ struct nvme_user_io32 {
#endif /* COMPAT_FOR_U64_ALIGNMENT */
static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
- void __user *argp, unsigned int flags, fmode_t mode)
+ void __user *argp, unsigned int flags, bool open_for_write)
{
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
return ns->head->ns_id;
case NVME_IOCTL_IO_CMD:
- return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
+ return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
/*
* struct nvme_user_io can have different padding on some 32-bit ABIs.
* Just accept the compat version as all fields that are used are the
@@ -702,16 +702,18 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
flags |= NVME_IOCTL_VEC;
fallthrough;
case NVME_IOCTL_IO64_CMD:
- return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
+ return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
+ open_for_write);
default:
return -ENOTTY;
}
}
-int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
+ bool open_for_write = mode & BLK_OPEN_WRITE;
void __user *argp = (void __user *)arg;
unsigned int flags = 0;
@@ -719,19 +721,20 @@ int nvme_ioctl(struct block_device *bdev, fmode_t mode,
flags |= NVME_IOCTL_PARTITION;
if (is_ctrl_ioctl(cmd))
- return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
- return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
+ return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
+ return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
}
long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct nvme_ns *ns =
container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+ bool open_for_write = file->f_mode & FMODE_WRITE;
void __user *argp = (void __user *)arg;
if (is_ctrl_ioctl(cmd))
- return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
- return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
+ return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
+ return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
}
static int nvme_uring_cmd_checks(unsigned int issue_flags)
@@ -800,7 +803,7 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx,
- fmode_t mode)
+ bool open_for_write)
__releases(&head->srcu)
{
struct nvme_ctrl *ctrl = ns->ctrl;
@@ -808,16 +811,17 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
nvme_get_ctrl(ns->ctrl);
srcu_read_unlock(&head->srcu, srcu_idx);
- ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
+ ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
nvme_put_ctrl(ctrl);
return ret;
}
-int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
+int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ bool open_for_write = mode & BLK_OPEN_WRITE;
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
int srcu_idx, ret = -EWOULDBLOCK;
@@ -838,9 +842,9 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
*/
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
- mode);
+ open_for_write);
- ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
+ ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
@@ -849,6 +853,7 @@ out_unlock:
long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ bool open_for_write = file->f_mode & FMODE_WRITE;
struct cdev *cdev = file_inode(file)->i_cdev;
struct nvme_ns_head *head =
container_of(cdev, struct nvme_ns_head, cdev);
@@ -863,9 +868,9 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
- file->f_mode);
+ open_for_write);
- ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
+ ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
@@ -940,7 +945,7 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
}
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
- fmode_t mode)
+ bool open_for_write)
{
struct nvme_ns *ns;
int ret;
@@ -964,7 +969,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
kref_get(&ns->kref);
up_read(&ctrl->namespaces_rwsem);
- ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
+ ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
nvme_put_ns(ns);
return ret;
@@ -976,16 +981,17 @@ out_unlock:
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ bool open_for_write = file->f_mode & FMODE_WRITE;
struct nvme_ctrl *ctrl = file->private_data;
void __user *argp = (void __user *)arg;
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
+ return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
+ return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
case NVME_IOCTL_IO_CMD:
- return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
+ return nvme_dev_user_cmd(ctrl, argp, open_for_write);
case NVME_IOCTL_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 9171452e2f6d..98001eebd275 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -402,14 +402,14 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
srcu_read_unlock(&head->srcu, srcu_idx);
}
-static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
+static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode)
{
- if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
+ if (!nvme_tryget_ns_head(disk->private_data))
return -ENXIO;
return 0;
}
-static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
+static void nvme_ns_head_release(struct gendisk *disk)
{
nvme_put_ns_head(disk->private_data);
}
@@ -884,7 +884,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bf46f122e9e1..9a98c14c552a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -149,6 +149,11 @@ enum nvme_quirks {
* Reports garbage in the namespace identifiers (eui64, nguid, uuid).
*/
NVME_QUIRK_BOGUS_NID = (1 << 18),
+
+ /*
+ * No temperature thresholds for channels other than 0 (Composite).
+ */
+ NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
};
/*
@@ -242,12 +247,13 @@ enum nvme_ctrl_flags {
NVME_CTRL_ADMIN_Q_STOPPED = 1,
NVME_CTRL_STARTED_ONCE = 2,
NVME_CTRL_STOPPED = 3,
+ NVME_CTRL_SKIP_ID_CNS_CS = 4,
};
struct nvme_ctrl {
bool comp_seen;
- enum nvme_ctrl_state state;
bool identified;
+ enum nvme_ctrl_state state;
spinlock_t lock;
struct mutex scan_lock;
const struct nvme_ctrl_ops *ops;
@@ -279,8 +285,8 @@ struct nvme_ctrl {
char name[12];
u16 cntlid;
- u32 ctrl_config;
u16 mtfa;
+ u32 ctrl_config;
u32 queue_count;
u64 cap;
@@ -323,6 +329,7 @@ struct nvme_ctrl {
struct delayed_work ka_work;
struct delayed_work failfast_work;
struct nvme_command ka_cmd;
+ unsigned long ka_last_check_time;
struct work_struct fw_act_work;
unsigned long events;
@@ -353,10 +360,10 @@ struct nvme_ctrl {
bool apst_enabled;
/* PCIe only: */
+ u16 hmmaxd;
u32 hmpre;
u32 hmmin;
u32 hmminds;
- u16 hmmaxd;
/* Fabrics only */
u32 ioccsz;
@@ -836,10 +843,10 @@ void nvme_put_ns_head(struct nvme_ns_head *head);
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
const struct file_operations *fops, struct module *owner);
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
-int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
+int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
@@ -860,7 +867,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group;
+extern const struct attribute_group *nvme_subsys_attrs_groups[];
+extern const struct attribute_group *nvme_dev_attr_groups[];
+extern const struct block_device_operations nvme_bdev_ops;
+void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@@ -1072,7 +1083,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
int nvme_execute_rq(struct request *rq, bool at_head);
-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 7f25c0fe3a0b..48c60f7fda0b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -420,10 +420,9 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_dev *dev = to_nvme_dev(set->driver_data);
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- nvme_req(req)->ctrl = &dev->ctrl;
+ nvme_req(req)->ctrl = set->driver_data;
nvme_req(req)->cmd = &iod->cmd;
return 0;
}
@@ -2956,7 +2955,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
* over a single page.
*/
dev->ctrl.max_hw_sectors = min_t(u32,
- NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9);
+ NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
dev->ctrl.max_segments = NVME_MAX_SEGS;
/*
@@ -3402,6 +3401,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */
+ .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
{ PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
@@ -3422,6 +3423,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
@@ -3441,6 +3444,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0eb79696fb73..d433b2ec07a6 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -501,7 +501,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
}
ibdev = queue->device->dev;
- /* +1 for ib_stop_cq */
+ /* +1 for ib_drain_qp */
queue->cq_size = cq_factor * queue->queue_size + 1;
ret = nvme_rdma_create_cq(ibdev, queue);
@@ -713,18 +713,10 @@ out_stop_queues:
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- struct ib_device *ibdev = ctrl->device->dev;
- unsigned int nr_io_queues, nr_default_queues;
- unsigned int nr_read_queues, nr_poll_queues;
+ unsigned int nr_io_queues;
int i, ret;
- nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
- min(opts->nr_io_queues, num_online_cpus()));
- nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
- min(opts->nr_write_queues, num_online_cpus()));
- nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
- nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
-
+ nr_io_queues = nvmf_nr_io_queues(opts);
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret)
return ret;
@@ -739,34 +731,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues);
- if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
- /*
- * separate read/write queues
- * hand out dedicated default queues only after we have
- * sufficient read queues.
- */
- ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(nr_default_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /*
- * shared read/write queues
- * either no write queues were requested, or we don't have
- * sufficient queue count to have dedicated default queues.
- */
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(nr_read_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- }
-
- if (opts->nr_poll_queues && nr_io_queues) {
- /* map dedicated poll queues only if we have queues left */
- ctrl->io_queues[HCTX_TYPE_POLL] =
- min(nr_poll_queues, nr_io_queues);
- }
-
+ nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvme_rdma_alloc_queue(ctrl, i,
ctrl->ctrl.sqsize + 1);
@@ -2138,44 +2103,8 @@ static void nvme_rdma_complete_rq(struct request *rq)
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
- /* separate read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_READ];
- set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /* shared read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_READ].queue_offset = 0;
- }
- blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
- blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
-
- if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
- /* map dedicated poll queues only if we have queues left */
- set->map[HCTX_TYPE_POLL].nr_queues =
- ctrl->io_queues[HCTX_TYPE_POLL];
- set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT] +
- ctrl->io_queues[HCTX_TYPE_READ];
- blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
- }
-
- dev_info(ctrl->ctrl.device,
- "mapped %d/%d/%d default/read/poll queues.\n",
- ctrl->io_queues[HCTX_TYPE_DEFAULT],
- ctrl->io_queues[HCTX_TYPE_READ],
- ctrl->io_queues[HCTX_TYPE_POLL]);
+ nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
}
static const struct blk_mq_ops nvme_rdma_mq_ops = {
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
new file mode 100644
index 000000000000..45e91811f905
--- /dev/null
+++ b/drivers/nvme/host/sysfs.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sysfs interface for the NVMe core driver.
+ *
+ * Copyright (c) 2011-2014, Intel Corporation.
+ */
+
+#include <linux/nvme-auth.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+static ssize_t nvme_sysfs_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = nvme_reset_ctrl_sync(ctrl);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ nvme_queue_scan(ctrl);
+ return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
+static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (disk->fops == &nvme_bdev_ops)
+ return nvme_get_ns_from_dev(dev)->head;
+ else
+ return disk->private_data;
+}
+
+static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ struct nvme_ns_ids *ids = &head->ids;
+ struct nvme_subsystem *subsys = head->subsys;
+ int serial_len = sizeof(subsys->serial);
+ int model_len = sizeof(subsys->model);
+
+ if (!uuid_is_null(&ids->uuid))
+ return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
+
+ if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
+
+ if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
+
+ while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
+ subsys->serial[serial_len - 1] == '\0'))
+ serial_len--;
+ while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
+ subsys->model[model_len - 1] == '\0'))
+ model_len--;
+
+ return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
+ serial_len, subsys->serial, model_len, subsys->model,
+ head->ns_id);
+}
+static DEVICE_ATTR_RO(wwid);
+
+static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
+}
+static DEVICE_ATTR_RO(nguid);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
+
+ /* For backward compatibility expose the NGUID to userspace if
+ * we have no UUID set
+ */
+ if (uuid_is_null(&ids->uuid)) {
+ dev_warn_ratelimited(dev,
+ "No UUID available providing old NGUID\n");
+ return sysfs_emit(buf, "%pU\n", ids->nguid);
+ }
+ return sysfs_emit(buf, "%pU\n", &ids->uuid);
+}
+static DEVICE_ATTR_RO(uuid);
+
+static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
+}
+static DEVICE_ATTR_RO(eui);
+
+static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
+}
+static DEVICE_ATTR_RO(nsid);
+
+static struct attribute *nvme_ns_id_attrs[] = {
+ &dev_attr_wwid.attr,
+ &dev_attr_uuid.attr,
+ &dev_attr_nguid.attr,
+ &dev_attr_eui.attr,
+ &dev_attr_nsid.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+ &dev_attr_ana_grpid.attr,
+ &dev_attr_ana_state.attr,
+#endif
+ NULL,
+};
+
+static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
+
+ if (a == &dev_attr_uuid.attr) {
+ if (uuid_is_null(&ids->uuid) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return 0;
+ }
+ if (a == &dev_attr_nguid.attr) {
+ if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return 0;
+ }
+ if (a == &dev_attr_eui.attr) {
+ if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ return 0;
+ }
+#ifdef CONFIG_NVME_MULTIPATH
+ if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
+ if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
+ return 0;
+ if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
+ return 0;
+ }
+#endif
+ return a->mode;
+}
+
+static const struct attribute_group nvme_ns_id_attr_group = {
+ .attrs = nvme_ns_id_attrs,
+ .is_visible = nvme_ns_id_attrs_are_visible,
+};
+
+const struct attribute_group *nvme_ns_id_attr_groups[] = {
+ &nvme_ns_id_attr_group,
+ NULL,
+};
+
+#define nvme_show_str_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sysfs_emit(buf, "%.*s\n", \
+ (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_str_function(model);
+nvme_show_str_function(serial);
+nvme_show_str_function(firmware_rev);
+
+#define nvme_show_int_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sysfs_emit(buf, "%d\n", ctrl->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_int_function(cntlid);
+nvme_show_int_function(numa_node);
+nvme_show_int_function(queue_count);
+nvme_show_int_function(sqsize);
+nvme_show_int_function(kato);
+
+static ssize_t nvme_sysfs_delete(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
+ return -EBUSY;
+
+ if (device_remove_file_self(dev, attr))
+ nvme_delete_ctrl_sync(ctrl);
+ return count;
+}
+static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
+
+static ssize_t nvme_sysfs_show_transport(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->ops->name);
+}
+static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
+
+static ssize_t nvme_sysfs_show_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ static const char *const state_name[] = {
+ [NVME_CTRL_NEW] = "new",
+ [NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_RESETTING] = "resetting",
+ [NVME_CTRL_CONNECTING] = "connecting",
+ [NVME_CTRL_DELETING] = "deleting",
+ [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
+ [NVME_CTRL_DEAD] = "dead",
+ };
+
+ if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
+ state_name[ctrl->state])
+ return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+
+ return sysfs_emit(buf, "unknown state\n");
+}
+
+static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
+
+static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
+}
+static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
+}
+static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
+}
+static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
+
+static ssize_t nvme_sysfs_show_address(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+
+static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (ctrl->opts->max_reconnects == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n",
+ opts->max_reconnects * opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ctrl_loss_tmo, err;
+
+ err = kstrtoint(buf, 10, &ctrl_loss_tmo);
+ if (err)
+ return -EINVAL;
+
+ if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+ else
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ return count;
+}
+static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
+
+static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->reconnect_delay == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int v;
+ int err;
+
+ err = kstrtou32(buf, 10, &v);
+ if (err)
+ return err;
+
+ ctrl->opts->reconnect_delay = v;
+ return count;
+}
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
+ nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
+
+static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->fast_io_fail_tmo == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
+}
+
+static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int fast_io_fail_tmo, err;
+
+ err = kstrtoint(buf, 10, &fast_io_fail_tmo);
+ if (err)
+ return -EINVAL;
+
+ if (fast_io_fail_tmo < 0)
+ opts->fast_io_fail_tmo = -1;
+ else
+ opts->fast_io_fail_tmo = fast_io_fail_tmo;
+ return count;
+}
+static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
+
+static ssize_t cntrltype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_CTRL_IO] = "io\n",
+ [NVME_CTRL_DISC] = "discovery\n",
+ [NVME_CTRL_ADMIN] = "admin\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->cntrltype]);
+}
+static DEVICE_ATTR_RO(cntrltype);
+
+static ssize_t dctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_DCTYPE_NOT_REPORTED] = "none\n",
+ [NVME_DCTYPE_DDC] = "ddc\n",
+ [NVME_DCTYPE_CDC] = "cdc\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->dctype]);
+}
+static DEVICE_ATTR_RO(dctype);
+
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ struct nvme_dhchap_key *key, *host_key;
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &key);
+ if (ret) {
+ kfree(dhchap_secret);
+ return ret;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ host_key = ctrl->host_key;
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ ctrl->host_key = key;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ nvme_auth_free_key(host_key);
+ } else
+ kfree(dhchap_secret);
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ struct nvme_dhchap_key *key, *ctrl_key;
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &key);
+ if (ret) {
+ kfree(dhchap_secret);
+ return ret;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ ctrl_key = ctrl->ctrl_key;
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ ctrl->ctrl_key = key;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ nvme_auth_free_key(ctrl_key);
+ } else
+ kfree(dhchap_secret);
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
+static struct attribute *nvme_dev_attrs[] = {
+ &dev_attr_reset_controller.attr,
+ &dev_attr_rescan_controller.attr,
+ &dev_attr_model.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_firmware_rev.attr,
+ &dev_attr_cntlid.attr,
+ &dev_attr_delete_controller.attr,
+ &dev_attr_transport.attr,
+ &dev_attr_subsysnqn.attr,
+ &dev_attr_address.attr,
+ &dev_attr_state.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_queue_count.attr,
+ &dev_attr_sqsize.attr,
+ &dev_attr_hostnqn.attr,
+ &dev_attr_hostid.attr,
+ &dev_attr_ctrl_loss_tmo.attr,
+ &dev_attr_reconnect_delay.attr,
+ &dev_attr_fast_io_fail_tmo.attr,
+ &dev_attr_kato.attr,
+ &dev_attr_cntrltype.attr,
+ &dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
+ NULL
+};
+
+static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
+ return 0;
+ if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
+ return 0;
+ if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_hostid.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
+ return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
+
+ return a->mode;
+}
+
+const struct attribute_group nvme_dev_attrs_group = {
+ .attrs = nvme_dev_attrs,
+ .is_visible = nvme_dev_attrs_are_visible,
+};
+EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
+
+const struct attribute_group *nvme_dev_attr_groups[] = {
+ &nvme_dev_attrs_group,
+ NULL,
+};
+
+#define SUBSYS_ATTR_RO(_name, _mode, _show) \
+ struct device_attribute subsys_attr_##_name = \
+ __ATTR(_name, _mode, _show, NULL)
+
+static ssize_t nvme_subsys_show_nqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ return sysfs_emit(buf, "%s\n", subsys->subnqn);
+}
+static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+
+static ssize_t nvme_subsys_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ switch (subsys->subtype) {
+ case NVME_NQN_DISC:
+ return sysfs_emit(buf, "discovery\n");
+ case NVME_NQN_NVME:
+ return sysfs_emit(buf, "nvm\n");
+ default:
+ return sysfs_emit(buf, "reserved\n");
+ }
+}
+static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
+
+#define nvme_subsys_show_str_function(field) \
+static ssize_t subsys_##field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_subsystem *subsys = \
+ container_of(dev, struct nvme_subsystem, dev); \
+ return sysfs_emit(buf, "%.*s\n", \
+ (int)sizeof(subsys->field), subsys->field); \
+} \
+static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
+
+nvme_subsys_show_str_function(model);
+nvme_subsys_show_str_function(serial);
+nvme_subsys_show_str_function(firmware_rev);
+
+static struct attribute *nvme_subsys_attrs[] = {
+ &subsys_attr_model.attr,
+ &subsys_attr_serial.attr,
+ &subsys_attr_firmware_rev.attr,
+ &subsys_attr_subsysnqn.attr,
+ &subsys_attr_subsystype.attr,
+#ifdef CONFIG_NVME_MULTIPATH
+ &subsys_attr_iopolicy.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group nvme_subsys_attrs_group = {
+ .attrs = nvme_subsys_attrs,
+};
+
+const struct attribute_group *nvme_subsys_attrs_groups[] = {
+ &nvme_subsys_attrs_group,
+ NULL,
+};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bf0230442d57..260b3554d821 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1802,58 +1802,12 @@ out_free_queues:
return ret;
}
-static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
-{
- unsigned int nr_io_queues;
-
- nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
- nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
- nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
-
- return nr_io_queues;
-}
-
-static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
- unsigned int nr_io_queues)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct nvmf_ctrl_options *opts = nctrl->opts;
-
- if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
- /*
- * separate read/write queues
- * hand out dedicated default queues only after we have
- * sufficient read queues.
- */
- ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_write_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /*
- * shared read/write queues
- * either no write queues were requested, or we don't have
- * sufficient queue count to have dedicated default queues.
- */
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_io_queues, nr_io_queues);
- nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
- }
-
- if (opts->nr_poll_queues && nr_io_queues) {
- /* map dedicated poll queues only if we have queues left */
- ctrl->io_queues[HCTX_TYPE_POLL] =
- min(opts->nr_poll_queues, nr_io_queues);
- }
-}
-
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
unsigned int nr_io_queues;
int ret;
- nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
+ nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
ret = nvme_set_queue_count(ctrl, &nr_io_queues);
if (ret)
return ret;
@@ -1868,8 +1822,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);
- nvme_tcp_set_io_queues(ctrl, nr_io_queues);
-
+ nvmf_set_io_queues(ctrl->opts, nr_io_queues,
+ to_tcp_ctrl(ctrl)->io_queues);
return __nvme_tcp_alloc_io_queues(ctrl);
}
@@ -2449,44 +2403,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
-
- if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
- /* separate read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_READ];
- set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- /* shared read/write queues */
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_READ].queue_offset = 0;
- }
- blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
- blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
-
- if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
- /* map dedicated poll queues only if we have queues left */
- set->map[HCTX_TYPE_POLL].nr_queues =
- ctrl->io_queues[HCTX_TYPE_POLL];
- set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT] +
- ctrl->io_queues[HCTX_TYPE_READ];
- blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
- }
-
- dev_info(ctrl->ctrl.device,
- "mapped %d/%d/%d default/read/poll queues.\n",
- ctrl->io_queues[HCTX_TYPE_DEFAULT],
- ctrl->io_queues[HCTX_TYPE_READ],
- ctrl->io_queues[HCTX_TYPE_POLL]);
+
+ nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
}
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 7970a7640e58..586458f765f1 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -295,13 +295,11 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
status = 0;
}
goto done_kfree;
- break;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
req->sq->authenticated = true;
pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
__func__, ctrl->cntlid, req->sq->qid);
goto done_kfree;
- break;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
status = nvmet_auth_failure2(d);
if (status) {
@@ -312,7 +310,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
status = 0;
}
goto done_kfree;
- break;
default:
req->sq->dhchap_status =
NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
@@ -320,7 +317,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
req->sq->authenticated = false;
goto done_kfree;
- break;
}
done_failure1:
req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
@@ -483,15 +479,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
break;
}
- if (status) {
- req->sq->dhchap_status = status;
- nvmet_auth_failure1(req, d, al);
- pr_warn("ctrl %d qid %d: challenge status (%x)\n",
- ctrl->cntlid, req->sq->qid,
- req->sq->dhchap_status);
- status = 0;
- break;
- }
req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
break;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index e940a7d37a9d..c65a73433c05 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -645,8 +645,6 @@ fcloop_fcp_recv_work(struct work_struct *work)
}
if (ret)
fcloop_call_host_done(fcpreq, tfcp_req, ret);
-
- return;
}
static void
@@ -1168,7 +1166,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
ret = nvme_fc_unregister_localport(lport->localport);
- wait_for_completion(&lport->unreg_done);
+ if (!ret)
+ wait_for_completion(&lport->unreg_done);
kfree(lport);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c2d6cea0236b..2733e0158585 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -51,7 +51,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
if (ns->bdev) {
- blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
+ blkdev_put(ns->bdev, NULL);
ns->bdev = NULL;
}
}
@@ -85,7 +85,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
return -ENOTBLK;
ns->bdev = blkdev_get_by_path(ns->device_path,
- FMODE_READ | FMODE_WRITE, NULL);
+ BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
if (IS_ERR(ns->bdev)) {
ret = PTR_ERR(ns->bdev);
if (ret != -ENOTBLK) {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index dc60a22646f7..6cf723bc664e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -109,8 +109,8 @@ struct nvmet_sq {
u32 sqhd;
bool sqhd_disabled;
#ifdef CONFIG_NVME_TARGET_AUTH
- struct delayed_work auth_expired_work;
bool authenticated;
+ struct delayed_work auth_expired_work;
u16 dhchap_tid;
u16 dhchap_status;
int dhchap_step;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 511c980d538d..71a9c1cc57f5 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
blk_mq_free_request(rq);
if (effects)
- nvme_passthru_end(ctrl, effects, req->cmd, status);
+ nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
}
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2e01960f1aeb..7feb643f1370 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -811,6 +811,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs)
if (!fragment->target) {
pr_err("symbols in overlay, but not in live tree\n");
ret = -EINVAL;
+ of_node_put(node);
goto err_out;
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index bc32662c6bb7..2d93d0c4f10d 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -489,7 +489,10 @@ struct hv_pcibus_device {
struct fwnode_handle *fwnode;
/* Protocol version negotiated with the host */
enum pci_protocol_version_t protocol_version;
+
+ struct mutex state_lock;
enum hv_pcibus_state state;
+
struct hv_device *hdev;
resource_size_t low_mmio_space;
resource_size_t high_mmio_space;
@@ -545,19 +548,10 @@ struct hv_dr_state {
struct hv_pcidev_description func[];
};
-enum hv_pcichild_state {
- hv_pcichild_init = 0,
- hv_pcichild_requirements,
- hv_pcichild_resourced,
- hv_pcichild_ejecting,
- hv_pcichild_maximum
-};
-
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
refcount_t refs;
- enum hv_pcichild_state state;
struct pci_slot *pci_slot;
struct hv_pcidev_description desc;
bool reported_missing;
@@ -635,6 +629,11 @@ static void hv_arch_irq_unmask(struct irq_data *data)
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
int_desc = data->chip_data;
+ if (!int_desc) {
+ dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
+ __func__, data->irq);
+ return;
+ }
local_irq_save(flags);
@@ -2004,12 +2003,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
hv_pci_onchannelcallback(hbus);
spin_unlock_irqrestore(&channel->sched_lock, flags);
- if (hpdev->state == hv_pcichild_ejecting) {
- dev_err_once(&hbus->hdev->device,
- "the device is being ejected\n");
- goto enable_tasklet;
- }
-
udelay(100);
}
@@ -2615,6 +2608,8 @@ static void pci_devices_present_work(struct work_struct *work)
if (!dr)
return;
+ mutex_lock(&hbus->state_lock);
+
/* First, mark all existing children as reported missing. */
spin_lock_irqsave(&hbus->device_list_lock, flags);
list_for_each_entry(hpdev, &hbus->children, list_entry) {
@@ -2696,6 +2691,8 @@ static void pci_devices_present_work(struct work_struct *work)
break;
}
+ mutex_unlock(&hbus->state_lock);
+
kfree(dr);
}
@@ -2844,7 +2841,7 @@ static void hv_eject_device_work(struct work_struct *work)
hpdev = container_of(work, struct hv_pci_dev, wrk);
hbus = hpdev->hbus;
- WARN_ON(hpdev->state != hv_pcichild_ejecting);
+ mutex_lock(&hbus->state_lock);
/*
* Ejection can come before or after the PCI bus has been set up, so
@@ -2882,6 +2879,8 @@ static void hv_eject_device_work(struct work_struct *work)
put_pcichild(hpdev);
put_pcichild(hpdev);
/* hpdev has been freed. Do not use it any more. */
+
+ mutex_unlock(&hbus->state_lock);
}
/**
@@ -2902,7 +2901,6 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
return;
}
- hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
queue_work(hbus->wq, &hpdev->wrk);
@@ -3331,8 +3329,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
struct pci_bus_d0_entry *d0_entry;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
+ bool retry = true;
int ret;
+enter_d0_retry:
/*
* Tell the host that the bus is ready to use, and moved into the
* powered-on state. This includes telling the host which region
@@ -3359,6 +3359,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
if (ret)
goto exit;
+ /*
+ * In certain case (Kdump) the pci device of interest was
+ * not cleanly shut down and resource is still held on host
+ * side, the host could return invalid device status.
+ * We need to explicitly request host to release the resource
+ * and try to enter D0 again.
+ */
+ if (comp_pkt.completion_status < 0 && retry) {
+ retry = false;
+
+ dev_err(&hdev->device, "Retrying D0 Entry\n");
+
+ /*
+ * Hv_pci_bus_exit() calls hv_send_resource_released()
+ * to free up resources of its child devices.
+ * In the kdump kernel we need to set the
+ * wslot_res_allocated to 255 so it scans all child
+ * devices to release resources allocated in the
+ * normal kernel before panic happened.
+ */
+ hbus->wslot_res_allocated = 255;
+
+ ret = hv_pci_bus_exit(hdev, true);
+
+ if (ret == 0) {
+ kfree(pkt);
+ goto enter_d0_retry;
+ }
+ dev_err(&hdev->device,
+ "Retrying D0 failed with ret %d\n", ret);
+ }
+
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -3401,6 +3433,24 @@ static int hv_pci_query_relations(struct hv_device *hdev)
if (!ret)
ret = wait_for_response(hdev, &comp);
+ /*
+ * In the case of fast device addition/removal, it's possible that
+ * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
+ * already got a PCI_BUS_RELATIONS* message from the host and the
+ * channel callback already scheduled a work to hbus->wq, which can be
+ * running pci_devices_present_work() -> survey_child_resources() ->
+ * complete(&hbus->survey_event), even after hv_pci_query_relations()
+ * exits and the stack variable 'comp' is no longer valid; as a result,
+ * a hang or a page fault may happen when the complete() calls
+ * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
+ * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
+ * -ENODEV, there can't be any more work item scheduled to hbus->wq
+ * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
+ * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
+ * channel->rescind = true.
+ */
+ flush_workqueue(hbus->wq);
+
return ret;
}
@@ -3586,7 +3636,6 @@ static int hv_pci_probe(struct hv_device *hdev,
struct hv_pcibus_device *hbus;
u16 dom_req, dom;
char *name;
- bool enter_d0_retry = true;
int ret;
bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
@@ -3598,6 +3647,7 @@ static int hv_pci_probe(struct hv_device *hdev,
return -ENOMEM;
hbus->bridge = bridge;
+ mutex_init(&hbus->state_lock);
hbus->state = hv_pcibus_init;
hbus->wslot_res_allocated = -1;
@@ -3703,49 +3753,15 @@ static int hv_pci_probe(struct hv_device *hdev,
if (ret)
goto free_fwnode;
-retry:
ret = hv_pci_query_relations(hdev);
if (ret)
goto free_irq_domain;
- ret = hv_pci_enter_d0(hdev);
- /*
- * In certain case (Kdump) the pci device of interest was
- * not cleanly shut down and resource is still held on host
- * side, the host could return invalid device status.
- * We need to explicitly request host to release the resource
- * and try to enter D0 again.
- * Since the hv_pci_bus_exit() call releases structures
- * of all its child devices, we need to start the retry from
- * hv_pci_query_relations() call, requesting host to send
- * the synchronous child device relations message before this
- * information is needed in hv_send_resources_allocated()
- * call later.
- */
- if (ret == -EPROTO && enter_d0_retry) {
- enter_d0_retry = false;
-
- dev_err(&hdev->device, "Retrying D0 Entry\n");
-
- /*
- * Hv_pci_bus_exit() calls hv_send_resources_released()
- * to free up resources of its child devices.
- * In the kdump kernel we need to set the
- * wslot_res_allocated to 255 so it scans all child
- * devices to release resources allocated in the
- * normal kernel before panic happened.
- */
- hbus->wslot_res_allocated = 255;
- ret = hv_pci_bus_exit(hdev, true);
-
- if (ret == 0)
- goto retry;
+ mutex_lock(&hbus->state_lock);
- dev_err(&hdev->device,
- "Retrying D0 failed with ret %d\n", ret);
- }
+ ret = hv_pci_enter_d0(hdev);
if (ret)
- goto free_irq_domain;
+ goto release_state_lock;
ret = hv_pci_allocate_bridge_windows(hbus);
if (ret)
@@ -3763,12 +3779,15 @@ retry:
if (ret)
goto free_windows;
+ mutex_unlock(&hbus->state_lock);
return 0;
free_windows:
hv_pci_free_bridge_windows(hbus);
exit_d0:
(void) hv_pci_bus_exit(hdev, true);
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
@@ -4018,20 +4037,26 @@ static int hv_pci_resume(struct hv_device *hdev)
if (ret)
goto out;
+ mutex_lock(&hbus->state_lock);
+
ret = hv_pci_enter_d0(hdev);
if (ret)
- goto out;
+ goto release_state_lock;
ret = hv_send_resources_allocated(hdev);
if (ret)
- goto out;
+ goto release_state_lock;
prepopulate_bars(hbus);
hv_pci_restore_msi_state(hbus);
hbus->state = hv_pcibus_installed;
+ mutex_unlock(&hbus->state_lock);
return 0;
+
+release_state_lock:
+ mutex_unlock(&hbus->state_lock);
out:
vmbus_close(hdev->channel);
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f4e2a88729fd..c525867760bf 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -6003,8 +6003,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency
#ifdef CONFIG_PCIE_DPC
/*
- * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
- * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
+ * Intel Ice Lake, Tiger Lake and Alder Lake BIOS has a bug that clears
+ * the DPC RP PIO Log Size of the integrated Thunderbolt PCIe Root
+ * Ports.
*/
static void dpc_log_size(struct pci_dev *dev)
{
@@ -6027,6 +6028,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1d, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a21, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a23, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 711f82400086..4c07d71cab0b 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -127,6 +127,14 @@ config FSL_IMX8_DDR_PMU
can give information about memory throughput and other related
events.
+config FSL_IMX9_DDR_PMU
+ tristate "Freescale i.MX9 DDR perf monitor"
+ depends on ARCH_MXC
+ help
+ Provides support for the DDR performance monitor in i.MX9, which
+ can give information about memory throughput and other related
+ events.
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index dabc859540ce..5cfe8954c250 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
+obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 8574c6e58c83..cd2de44b61b9 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -493,6 +493,17 @@ static int m1_pmu_map_event(struct perf_event *event)
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
+static int m2_pmu_map_event(struct perf_event *event)
+{
+ /*
+ * Same deal as the above, except that M2 has 64bit counters.
+ * Which, as far as we're concerned, actually means 63 bits.
+ * Yes, this is getting awkward.
+ */
+ event->hw.flags |= ARMPMU_EVT_63BIT;
+ return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
+}
+
static void m1_pmu_reset(void *info)
{
int i;
@@ -525,7 +536,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
-static int m1_pmu_init(struct arm_pmu *cpu_pmu)
+static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
{
cpu_pmu->handle_irq = m1_pmu_handle_irq;
cpu_pmu->enable = m1_pmu_enable_event;
@@ -536,7 +547,14 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx;
cpu_pmu->start = m1_pmu_start;
cpu_pmu->stop = m1_pmu_stop;
- cpu_pmu->map_event = m1_pmu_map_event;
+
+ if (flags & ARMPMU_EVT_47BIT)
+ cpu_pmu->map_event = m1_pmu_map_event;
+ else if (flags & ARMPMU_EVT_63BIT)
+ cpu_pmu->map_event = m2_pmu_map_event;
+ else
+ return WARN_ON(-EINVAL);
+
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
@@ -550,25 +568,25 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu)
static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_icestorm_pmu";
- return m1_pmu_init(cpu_pmu);
+ return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
}
static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_firestorm_pmu";
- return m1_pmu_init(cpu_pmu);
+ return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT);
}
static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_avalanche_pmu";
- return m1_pmu_init(cpu_pmu);
+ return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
}
static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_blizzard_pmu";
- return m1_pmu_init(cpu_pmu);
+ return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT);
}
static const struct of_device_id m1_pmu_of_device_ids[] = {
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 03b1309875ae..998259f1d973 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -645,7 +645,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
DECLARE_BITMAP(mask, HW_CNTRS_MAX);
- bitmap_zero(mask, cci_pmu->num_cntrs);
+ bitmap_zero(mask, HW_CNTRS_MAX);
for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
struct perf_event *event = cci_hw->events[i];
@@ -656,7 +656,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
if (event->hw.state & PERF_HES_STOPPED)
continue;
if (event->hw.state & PERF_HES_ARCH) {
- set_bit(i, mask);
+ __set_bit(i, mask);
event->hw.state &= ~PERF_HES_ARCH;
}
}
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 47d359f72957..b8c15878bc86 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -44,8 +44,11 @@
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
/* The CFG node has various info besides the discovery tree */
-#define CMN_CFGM_PERIPH_ID_2 0x0010
-#define CMN_CFGM_PID2_REVISION GENMASK(7, 4)
+#define CMN_CFGM_PERIPH_ID_01 0x0008
+#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
+#define CMN_CFGM_PID1_PART_1 GENMASK_ULL(35, 32)
+#define CMN_CFGM_PERIPH_ID_23 0x0010
+#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
#define CMN_CFGM_INFO_GLOBAL 0x900
#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
@@ -186,6 +189,7 @@
#define CMN_WP_DOWN 2
+/* Internal values for encoding event support */
enum cmn_model {
CMN600 = 1,
CMN650 = 2,
@@ -197,26 +201,34 @@ enum cmn_model {
CMN_650ON = CMN650 | CMN700,
};
+/* Actual part numbers and revision IDs defined by the hardware */
+enum cmn_part {
+ PART_CMN600 = 0x434,
+ PART_CMN650 = 0x436,
+ PART_CMN700 = 0x43c,
+ PART_CI700 = 0x43a,
+};
+
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
enum cmn_revision {
- CMN600_R1P0,
- CMN600_R1P1,
- CMN600_R1P2,
- CMN600_R1P3,
- CMN600_R2P0,
- CMN600_R3P0,
- CMN600_R3P1,
- CMN650_R0P0 = 0,
- CMN650_R1P0,
- CMN650_R1P1,
- CMN650_R2P0,
- CMN650_R1P2,
- CMN700_R0P0 = 0,
- CMN700_R1P0,
- CMN700_R2P0,
- CI700_R0P0 = 0,
- CI700_R1P0,
- CI700_R2P0,
+ REV_CMN600_R1P0,
+ REV_CMN600_R1P1,
+ REV_CMN600_R1P2,
+ REV_CMN600_R1P3,
+ REV_CMN600_R2P0,
+ REV_CMN600_R3P0,
+ REV_CMN600_R3P1,
+ REV_CMN650_R0P0 = 0,
+ REV_CMN650_R1P0,
+ REV_CMN650_R1P1,
+ REV_CMN650_R2P0,
+ REV_CMN650_R1P2,
+ REV_CMN700_R0P0 = 0,
+ REV_CMN700_R1P0,
+ REV_CMN700_R2P0,
+ REV_CI700_R0P0 = 0,
+ REV_CI700_R1P0,
+ REV_CI700_R2P0,
};
enum cmn_node_type {
@@ -306,7 +318,7 @@ struct arm_cmn {
unsigned int state;
enum cmn_revision rev;
- enum cmn_model model;
+ enum cmn_part part;
u8 mesh_x;
u8 mesh_y;
u16 num_xps;
@@ -394,19 +406,35 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
return NULL;
}
+static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
+{
+ switch (cmn->part) {
+ case PART_CMN600:
+ return CMN600;
+ case PART_CMN650:
+ return CMN650;
+ case PART_CMN700:
+ return CMN700;
+ case PART_CI700:
+ return CI700;
+ default:
+ return 0;
+ };
+}
+
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
int offset = CMN_MXP__CONNECT_INFO(port);
if (port >= 2) {
- if (cmn->model & (CMN600 | CMN650))
+ if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
return 0;
/*
* CI-700 may have extra ports, but still has the
* mesh_port_connect_info registers in the way.
*/
- if (cmn->model == CI700)
+ if (cmn->part == PART_CI700)
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
@@ -640,7 +668,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
eattr = container_of(attr, typeof(*eattr), attr.attr);
- if (!(eattr->model & cmn->model))
+ if (!(eattr->model & arm_cmn_model(cmn)))
return 0;
type = eattr->type;
@@ -658,7 +686,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
return 0;
- if (chan == 4 && cmn->model == CMN600)
+ if (chan == 4 && cmn->part == PART_CMN600)
return 0;
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
@@ -669,19 +697,19 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
}
/* Revision-specific differences */
- if (cmn->model == CMN600) {
- if (cmn->rev < CMN600_R1P3) {
+ if (cmn->part == PART_CMN600) {
+ if (cmn->rev < REV_CMN600_R1P3) {
if (type == CMN_TYPE_CXRA && eventid > 0x10)
return 0;
}
- if (cmn->rev < CMN600_R1P2) {
+ if (cmn->rev < REV_CMN600_R1P2) {
if (type == CMN_TYPE_HNF && eventid == 0x1b)
return 0;
if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
return 0;
}
- } else if (cmn->model == CMN650) {
- if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) {
+ } else if (cmn->part == PART_CMN650) {
+ if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) {
if (type == CMN_TYPE_HNF && eventid > 0x22)
return 0;
if (type == CMN_TYPE_SBSX && eventid == 0x17)
@@ -689,8 +717,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if (type == CMN_TYPE_RNI && eventid > 0x10)
return 0;
}
- } else if (cmn->model == CMN700) {
- if (cmn->rev < CMN700_R2P0) {
+ } else if (cmn->part == PART_CMN700) {
+ if (cmn->rev < REV_CMN700_R2P0) {
if (type == CMN_TYPE_HNF && eventid > 0x2c)
return 0;
if (type == CMN_TYPE_CCHA && eventid > 0x74)
@@ -698,7 +726,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if (type == CMN_TYPE_CCLA && eventid > 0x27)
return 0;
}
- if (cmn->rev < CMN700_R1P0) {
+ if (cmn->rev < REV_CMN700_R1P0) {
if (type == CMN_TYPE_HNF && eventid > 0x2b)
return 0;
}
@@ -1171,19 +1199,31 @@ static ssize_t arm_cmn_cpumask_show(struct device *dev,
static struct device_attribute arm_cmn_cpumask_attr =
__ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);
-static struct attribute *arm_cmn_cpumask_attrs[] = {
+static ssize_t arm_cmn_identifier_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev);
+}
+
+static struct device_attribute arm_cmn_identifier_attr =
+ __ATTR(identifier, 0444, arm_cmn_identifier_show, NULL);
+
+static struct attribute *arm_cmn_other_attrs[] = {
&arm_cmn_cpumask_attr.attr,
+ &arm_cmn_identifier_attr.attr,
NULL,
};
-static const struct attribute_group arm_cmn_cpumask_attr_group = {
- .attrs = arm_cmn_cpumask_attrs,
+static const struct attribute_group arm_cmn_other_attrs_group = {
+ .attrs = arm_cmn_other_attrs,
};
static const struct attribute_group *arm_cmn_attr_groups[] = {
&arm_cmn_event_attrs_group,
&arm_cmn_format_attrs_group,
- &arm_cmn_cpumask_attr_group,
+ &arm_cmn_other_attrs_group,
NULL
};
@@ -1200,7 +1240,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
u32 grp = CMN_EVENT_WP_GRP(event);
u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
u32 combine = CMN_EVENT_WP_COMBINE(event);
- bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
+ bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
@@ -1520,14 +1560,14 @@ done:
return ret;
}
-static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model,
+static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn,
enum cmn_node_type type,
unsigned int eventid)
{
struct arm_cmn_event_attr *e;
- int i;
+ enum cmn_model model = arm_cmn_model(cmn);
- for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
+ for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
if (e->model & model && e->type == type && e->eventid == eventid)
return e->fsel;
@@ -1570,12 +1610,12 @@ static int arm_cmn_event_init(struct perf_event *event)
/* ...but the DTM may depend on which port we're watching */
if (cmn->multi_dtm)
hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
- } else if (type == CMN_TYPE_XP && cmn->model == CMN700) {
+ } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
hw->wide_sel = true;
}
/* This is sufficiently annoying to recalculate, so cache it */
- hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid);
+ hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid);
bynodeid = CMN_EVENT_BYNODEID(event);
nodeid = CMN_EVENT_NODEID(event);
@@ -1899,9 +1939,10 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id
if (dtc->irq < 0)
return dtc->irq;
- writel_relaxed(0, dtc->base + CMN_DT_PMCR);
+ writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
+ writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
+ writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
- writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
return 0;
}
@@ -1961,7 +2002,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
dn->type = CMN_TYPE_CCLA;
}
- writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
+ arm_cmn_set_state(cmn, CMN_STATE_DISABLED);
return 0;
}
@@ -2006,6 +2047,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
void __iomem *cfg_region;
struct arm_cmn_node cfg, *dn;
struct arm_cmn_dtm *dtm;
+ enum cmn_part part;
u16 child_count, child_poff;
u32 xp_offset[CMN_MAX_XPS];
u64 reg;
@@ -2017,7 +2059,19 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
return -ENODEV;
cfg_region = cmn->base + rgn_offset;
- reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
+
+ reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01);
+ part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg);
+ part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8;
+ if (cmn->part && cmn->part != part)
+ dev_warn(cmn->dev,
+ "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n",
+ cmn->part, part);
+ cmn->part = part;
+ if (!arm_cmn_model(cmn))
+ dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part);
+
+ reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
@@ -2081,7 +2135,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
if (xp->id == (1 << 3))
cmn->mesh_x = xp->logid;
- if (cmn->model == CMN600)
+ if (cmn->part == PART_CMN600)
xp->dtc = 0xf;
else
xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
@@ -2201,7 +2255,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
if (cmn->num_xps == 1)
dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
- dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
+ dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev);
reg = cmn->ports_used;
dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
@@ -2256,17 +2310,17 @@ static int arm_cmn_probe(struct platform_device *pdev)
return -ENOMEM;
cmn->dev = &pdev->dev;
- cmn->model = (unsigned long)device_get_match_data(cmn->dev);
+ cmn->part = (unsigned long)device_get_match_data(cmn->dev);
platform_set_drvdata(pdev, cmn);
- if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) {
+ if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
rootnode = arm_cmn600_acpi_probe(pdev, cmn);
} else {
rootnode = 0;
cmn->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cmn->base))
return PTR_ERR(cmn->base);
- if (cmn->model == CMN600)
+ if (cmn->part == PART_CMN600)
rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
}
if (rootnode < 0)
@@ -2335,10 +2389,10 @@ static int arm_cmn_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id arm_cmn_of_match[] = {
- { .compatible = "arm,cmn-600", .data = (void *)CMN600 },
- { .compatible = "arm,cmn-650", .data = (void *)CMN650 },
- { .compatible = "arm,cmn-700", .data = (void *)CMN700 },
- { .compatible = "arm,ci-700", .data = (void *)CI700 },
+ { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
+ { .compatible = "arm,cmn-650" },
+ { .compatible = "arm,cmn-700" },
+ { .compatible = "arm,ci-700" },
{}
};
MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
@@ -2346,9 +2400,9 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id arm_cmn_acpi_match[] = {
- { "ARMHC600", CMN600 },
- { "ARMHC650", CMN650 },
- { "ARMHC700", CMN700 },
+ { "ARMHC600", PART_CMN600 },
+ { "ARMHC650" },
+ { "ARMHC700" },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
diff --git a/drivers/perf/arm_cspmu/Kconfig b/drivers/perf/arm_cspmu/Kconfig
index 0b316fe69a45..25d25ded0983 100644
--- a/drivers/perf/arm_cspmu/Kconfig
+++ b/drivers/perf/arm_cspmu/Kconfig
@@ -4,8 +4,7 @@
config ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU
tristate "ARM Coresight Architecture PMU"
- depends on ARM64 && ACPI
- depends on ACPI_APMT || COMPILE_TEST
+ depends on ARM64 || COMPILE_TEST
help
Provides support for performance monitoring unit (PMU) devices
based on ARM CoreSight PMU architecture. Note that this PMU
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index a3f1c410b417..e2b7827c4563 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
-#include <acpi/processor.h>
#include "arm_cspmu.h"
#include "nvidia_cspmu.h"
@@ -101,10 +100,6 @@
#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
-/* Check if field f in flags is set with value v */
-#define CHECK_APMT_FLAG(flags, f, v) \
- ((flags & (ACPI_APMT_FLAGS_ ## f)) == (ACPI_APMT_FLAGS_ ## f ## _ ## v))
-
/* Check and use default if implementer doesn't provide attribute callback */
#define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
do { \
@@ -122,6 +117,11 @@
static unsigned long arm_cspmu_cpuhp_state;
+static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
+{
+ return *(struct acpi_apmt_node **)dev_get_platdata(dev);
+}
+
/*
* In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
* counter register. The counter register can be implemented as 32-bit or 64-bit
@@ -156,12 +156,6 @@ static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
return val;
}
-/* Check if PMU supports 64-bit single copy atomic. */
-static inline bool supports_64bit_atomics(const struct arm_cspmu *cspmu)
-{
- return CHECK_APMT_FLAG(cspmu->apmt_node->flags, ATOMIC, SUPP);
-}
-
/* Check if cycle counter is supported. */
static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
{
@@ -189,10 +183,10 @@ static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dev_ext_attribute *eattr =
- container_of(attr, struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "event=0x%llx\n",
- (unsigned long long)eattr->var);
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
+ return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id);
}
EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
@@ -320,7 +314,7 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 };
dev = cspmu->dev;
- apmt_node = cspmu->apmt_node;
+ apmt_node = arm_cspmu_apmt_node(dev);
pmu_type = apmt_node->type;
if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
@@ -397,8 +391,8 @@ static const struct impl_match impl_match[] = {
static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
{
int ret;
- struct acpi_apmt_node *apmt_node = cspmu->apmt_node;
struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
+ struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
const struct impl_match *match = impl_match;
/*
@@ -720,7 +714,7 @@ static u64 arm_cspmu_read_counter(struct perf_event *event)
offset = counter_offset(sizeof(u64), event->hw.idx);
counter_addr = cspmu->base1 + offset;
- return supports_64bit_atomics(cspmu) ?
+ return cspmu->has_atomic_dword ?
readq(counter_addr) :
read_reg64_hilohi(counter_addr, HILOHI_MAX_POLL);
}
@@ -911,24 +905,18 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
{
struct acpi_apmt_node *apmt_node;
struct arm_cspmu *cspmu;
- struct device *dev;
-
- dev = &pdev->dev;
- apmt_node = *(struct acpi_apmt_node **)dev_get_platdata(dev);
- if (!apmt_node) {
- dev_err(dev, "failed to get APMT node\n");
- return NULL;
- }
+ struct device *dev = &pdev->dev;
cspmu = devm_kzalloc(dev, sizeof(*cspmu), GFP_KERNEL);
if (!cspmu)
return NULL;
cspmu->dev = dev;
- cspmu->apmt_node = apmt_node;
-
platform_set_drvdata(pdev, cspmu);
+ apmt_node = arm_cspmu_apmt_node(dev);
+ cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+
return cspmu;
}
@@ -936,11 +924,9 @@ static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
{
struct device *dev;
struct platform_device *pdev;
- struct acpi_apmt_node *apmt_node;
dev = cspmu->dev;
pdev = to_platform_device(dev);
- apmt_node = cspmu->apmt_node;
/* Base address for page 0. */
cspmu->base0 = devm_platform_ioremap_resource(pdev, 0);
@@ -951,7 +937,7 @@ static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
/* Base address for page 1 if supported. Otherwise point to page 0. */
cspmu->base1 = cspmu->base0;
- if (CHECK_APMT_FLAG(apmt_node->flags, DUAL_PAGE, SUPP)) {
+ if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) {
cspmu->base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cspmu->base1)) {
dev_err(dev, "ioremap failed for page-1 resource\n");
@@ -1048,19 +1034,14 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
int irq, ret;
struct device *dev;
struct platform_device *pdev;
- struct acpi_apmt_node *apmt_node;
dev = cspmu->dev;
pdev = to_platform_device(dev);
- apmt_node = cspmu->apmt_node;
/* Skip IRQ request if the PMU does not support overflow interrupt. */
- if (apmt_node->ovflw_irq == 0)
- return 0;
-
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq < 0)
- return irq;
+ return irq == -ENXIO ? 0 : irq;
ret = devm_request_irq(dev, irq, arm_cspmu_handle_irq,
IRQF_NOBALANCING | IRQF_NO_THREAD, dev_name(dev),
@@ -1075,6 +1056,9 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
return 0;
}
+#if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
+#include <acpi/processor.h>
+
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
u32 acpi_uid;
@@ -1099,15 +1083,13 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
return -ENODEV;
}
-static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
+static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
{
- struct device *dev;
struct acpi_apmt_node *apmt_node;
int affinity_flag;
int cpu;
- dev = cspmu->pmu.dev;
- apmt_node = cspmu->apmt_node;
+ apmt_node = arm_cspmu_apmt_node(cspmu->dev);
affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;
if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
@@ -1129,12 +1111,23 @@ static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
}
if (cpumask_empty(&cspmu->associated_cpus)) {
- dev_dbg(dev, "No cpu associated with the PMU\n");
+ dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
return -ENODEV;
}
return 0;
}
+#else
+static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
+{
+ return -ENODEV;
+}
+#endif
+
+static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
+{
+ return arm_cspmu_acpi_get_cpus(cspmu);
+}
static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
{
@@ -1220,6 +1213,12 @@ static int arm_cspmu_device_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id arm_cspmu_id[] = {
+ {DRVNAME, 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
+
static struct platform_driver arm_cspmu_driver = {
.driver = {
.name = DRVNAME,
@@ -1227,12 +1226,14 @@ static struct platform_driver arm_cspmu_driver = {
},
.probe = arm_cspmu_device_probe,
.remove = arm_cspmu_device_remove,
+ .id_table = arm_cspmu_id,
};
static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
{
cpumask_set_cpu(cpu, &cspmu->active_cpu);
- WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
+ if (cspmu->irq)
+ WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
}
static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 51323b175a4a..83df53d1c132 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -8,7 +8,6 @@
#ifndef __ARM_CSPMU_H__
#define __ARM_CSPMU_H__
-#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/cpumask.h>
#include <linux/device.h>
@@ -118,16 +117,16 @@ struct arm_cspmu_impl {
struct arm_cspmu {
struct pmu pmu;
struct device *dev;
- struct acpi_apmt_node *apmt_node;
const char *name;
const char *identifier;
void __iomem *base0;
void __iomem *base1;
- int irq;
cpumask_t associated_cpus;
cpumask_t active_cpu;
struct hlist_node cpuhp_node;
+ int irq;
+ bool has_atomic_dword;
u32 pmcfgr;
u32 num_logical_ctrs;
u32 num_set_clr_reg;
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 5de06f9a4dd3..9d0f01c4455a 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -227,9 +227,31 @@ static const struct attribute_group dmc620_pmu_format_attr_group = {
.attrs = dmc620_pmu_formats_attrs,
};
+static ssize_t dmc620_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf,
+ cpumask_of(dmc620_pmu->irq->cpu));
+}
+
+static struct device_attribute dmc620_pmu_cpumask_attr =
+ __ATTR(cpumask, 0444, dmc620_pmu_cpumask_show, NULL);
+
+static struct attribute *dmc620_pmu_cpumask_attrs[] = {
+ &dmc620_pmu_cpumask_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group dmc620_pmu_cpumask_attr_group = {
+ .attrs = dmc620_pmu_cpumask_attrs,
+};
+
static const struct attribute_group *dmc620_pmu_attr_groups[] = {
&dmc620_pmu_events_attr_group,
&dmc620_pmu_format_attr_group,
+ &dmc620_pmu_cpumask_attr_group,
NULL,
};
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 15bd1e34a88e..277e29fbd504 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
if (event->hw.flags & ARMPMU_EVT_64BIT)
return GENMASK_ULL(63, 0);
+ else if (event->hw.flags & ARMPMU_EVT_63BIT)
+ return GENMASK_ULL(62, 0);
else if (event->hw.flags & ARMPMU_EVT_47BIT)
return GENMASK_ULL(46, 0);
else
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index c98e4039386d..93b7edb5f1e7 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -677,9 +677,25 @@ static inline u32 armv8pmu_getreset_flags(void)
return value;
}
+static void update_pmuserenr(u64 val)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * The current PMUSERENR_EL0 value might be the value for the guest.
+ * If that's the case, have KVM keep tracking of the register value
+ * for the host EL0 so that KVM can restore it before returning to
+ * the host EL0. Otherwise, update the register now.
+ */
+ if (kvm_set_pmuserenr(val))
+ return;
+
+ write_pmuserenr(val);
+}
+
static void armv8pmu_disable_user_access(void)
{
- write_pmuserenr(0);
+ update_pmuserenr(0);
}
static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
@@ -695,8 +711,7 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
armv8pmu_write_evcntr(i, 0);
}
- write_pmuserenr(0);
- write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
+ update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
}
static void armv8pmu_enable_event(struct perf_event *event)
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
new file mode 100644
index 000000000000..71d5b07e3aff
--- /dev/null
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2023 NXP
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/perf_event.h>
+
+/* Performance monitor configuration */
+#define PMCFG1 0x00
+#define PMCFG1_RD_TRANS_FILT_EN BIT(31)
+#define PMCFG1_WR_TRANS_FILT_EN BIT(30)
+#define PMCFG1_RD_BT_FILT_EN BIT(29)
+#define PMCFG1_ID_MASK GENMASK(17, 0)
+
+#define PMCFG2 0x04
+#define PMCFG2_ID GENMASK(17, 0)
+
+/* Global control register affects all counters and takes priority over local control registers */
+#define PMGC0 0x40
+/* Global control register bits */
+#define PMGC0_FAC BIT(31)
+#define PMGC0_PMIE BIT(30)
+#define PMGC0_FCECE BIT(29)
+
+/*
+ * 64bit counter0 exclusively dedicated to counting cycles
+ * 32bit counters monitor counter-specific events in addition to counting reference events
+ */
+#define PMLCA(n) (0x40 + 0x10 + (0x10 * n))
+#define PMLCB(n) (0x40 + 0x14 + (0x10 * n))
+#define PMC(n) (0x40 + 0x18 + (0x10 * n))
+/* Local control register bits */
+#define PMLCA_FC BIT(31)
+#define PMLCA_CE BIT(26)
+#define PMLCA_EVENT GENMASK(22, 16)
+
+#define NUM_COUNTERS 11
+#define CYCLES_COUNTER 0
+
+#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
+
+#define DDR_PERF_DEV_NAME "imx9_ddr"
+#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
+
+static DEFINE_IDA(ddr_ida);
+
+struct imx_ddr_devtype_data {
+ const char *identifier; /* system PMU identifier for userspace */
+};
+
+struct ddr_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct hlist_node node;
+ struct device *dev;
+ struct perf_event *events[NUM_COUNTERS];
+ int active_events;
+ enum cpuhp_state cpuhp_state;
+ const struct imx_ddr_devtype_data *devtype_data;
+ int irq;
+ int id;
+};
+
+static const struct imx_ddr_devtype_data imx93_devtype_data = {
+ .identifier = "imx93",
+};
+
+static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
+ {.compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
+
+static ssize_t ddr_perf_identifier_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
+}
+
+static struct device_attribute ddr_perf_identifier_attr =
+ __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
+
+static struct attribute *ddr_perf_identifier_attrs[] = {
+ &ddr_perf_identifier_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ddr_perf_identifier_attr_group = {
+ .attrs = ddr_perf_identifier_attrs,
+};
+
+static ssize_t ddr_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute ddr_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
+
+static struct attribute *ddr_perf_cpumask_attrs[] = {
+ &ddr_perf_cpumask_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ddr_perf_cpumask_attr_group = {
+ .attrs = ddr_perf_cpumask_attrs,
+};
+
+static ssize_t ddr_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *ddr_perf_events_attrs[] = {
+ /* counter0 cycles event */
+ IMX9_DDR_PMU_EVENT_ATTR(cycles, 0),
+
+ /* reference events for all normal counters, need assert DEBUG19[21] bit */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ddrc1_rmw_for_ecc, 12),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_rreorder, 13),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_wreorder, 14),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_0, 15),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_1, 16),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_2, 17),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_3, 18),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_4, 19),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_5, 22),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_6, 23),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_7, 24),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_8, 25),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_9, 26),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_10, 27),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_11, 28),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_12, 31),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_13, 59),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_15, 61),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
+
+ /* counter1 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
+
+ /* counter2 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
+
+ /* counter3 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
+
+ /* counter4 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
+
+ /* counter5 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
+
+ /* counter6 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
+
+ /* counter7 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
+
+ /* counter8 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
+
+ /* counter9 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
+
+ /* counter10 specific events */
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
+ NULL,
+};
+
+static const struct attribute_group ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = ddr_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(counter, "config:8-15");
+PMU_FORMAT_ATTR(axi_id, "config1:0-17");
+PMU_FORMAT_ATTR(axi_mask, "config2:0-17");
+
+static struct attribute *ddr_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_counter.attr,
+ &format_attr_axi_id.attr,
+ &format_attr_axi_mask.attr,
+ NULL,
+};
+
+static const struct attribute_group ddr_perf_format_attr_group = {
+ .name = "format",
+ .attrs = ddr_perf_format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+ &ddr_perf_identifier_attr_group,
+ &ddr_perf_cpumask_attr_group,
+ &ddr_perf_events_attr_group,
+ &ddr_perf_format_attr_group,
+ NULL,
+};
+
+static void ddr_perf_clear_counter(struct ddr_pmu *pmu, int counter)
+{
+ if (counter == CYCLES_COUNTER) {
+ writel(0, pmu->base + PMC(counter) + 0x4);
+ writel(0, pmu->base + PMC(counter));
+ } else {
+ writel(0, pmu->base + PMC(counter));
+ }
+}
+
+static u64 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
+{
+ u32 val_lower, val_upper;
+ u64 val;
+
+ if (counter != CYCLES_COUNTER) {
+ val = readl_relaxed(pmu->base + PMC(counter));
+ goto out;
+ }
+
+ /* special handling for reading 64bit cycle counter */
+ do {
+ val_upper = readl_relaxed(pmu->base + PMC(counter) + 0x4);
+ val_lower = readl_relaxed(pmu->base + PMC(counter));
+ } while (val_upper != readl_relaxed(pmu->base + PMC(counter) + 0x4));
+
+ val = val_upper;
+ val = (val << 32);
+ val |= val_lower;
+out:
+ return val;
+}
+
+static void ddr_perf_counter_global_config(struct ddr_pmu *pmu, bool enable)
+{
+ u32 ctrl;
+
+ ctrl = readl_relaxed(pmu->base + PMGC0);
+
+ if (enable) {
+ /*
+ * The performance monitor must be reset before event counting
+ * sequences. The performance monitor can be reset by first freezing
+ * one or more counters and then clearing the freeze condition to
+ * allow the counters to count according to the settings in the
+ * performance monitor registers. Counters can be frozen individually
+ * by setting PMLCAn[FC] bits, or simultaneously by setting PMGC0[FAC].
+ * Simply clearing these freeze bits will then allow the performance
+ * monitor to begin counting based on the register settings.
+ */
+ ctrl |= PMGC0_FAC;
+ writel(ctrl, pmu->base + PMGC0);
+
+ /*
+ * Freeze all counters disabled, interrupt enabled, and freeze
+ * counters on condition enabled.
+ */
+ ctrl &= ~PMGC0_FAC;
+ ctrl |= PMGC0_PMIE | PMGC0_FCECE;
+ writel(ctrl, pmu->base + PMGC0);
+ } else {
+ ctrl |= PMGC0_FAC;
+ ctrl &= ~(PMGC0_PMIE | PMGC0_FCECE);
+ writel(ctrl, pmu->base + PMGC0);
+ }
+}
+
+static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
+ int counter, bool enable)
+{
+ u32 ctrl_a;
+
+ ctrl_a = readl_relaxed(pmu->base + PMLCA(counter));
+
+ if (enable) {
+ ctrl_a |= PMLCA_FC;
+ writel(ctrl_a, pmu->base + PMLCA(counter));
+
+ ddr_perf_clear_counter(pmu, counter);
+
+ /* Freeze counter disabled, condition enabled, and program event.*/
+ ctrl_a &= ~PMLCA_FC;
+ ctrl_a |= PMLCA_CE;
+ ctrl_a &= ~FIELD_PREP(PMLCA_EVENT, 0x7F);
+ ctrl_a |= FIELD_PREP(PMLCA_EVENT, (config & 0x000000FF));
+ writel(ctrl_a, pmu->base + PMLCA(counter));
+ } else {
+ /* Freeze counter. */
+ ctrl_a |= PMLCA_FC;
+ writel(ctrl_a, pmu->base + PMLCA(counter));
+ }
+}
+
+static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
+{
+ u32 pmcfg1, pmcfg2;
+ int event, counter;
+
+ event = cfg & 0x000000FF;
+ counter = (cfg & 0x0000FF00) >> 8;
+
+ pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
+
+ if (counter == 2 && event == 73)
+ pmcfg1 |= PMCFG1_RD_TRANS_FILT_EN;
+ else if (counter == 2 && event != 73)
+ pmcfg1 &= ~PMCFG1_RD_TRANS_FILT_EN;
+
+ if (counter == 3 && event == 73)
+ pmcfg1 |= PMCFG1_WR_TRANS_FILT_EN;
+ else if (counter == 3 && event != 73)
+ pmcfg1 &= ~PMCFG1_WR_TRANS_FILT_EN;
+
+ if (counter == 4 && event == 73)
+ pmcfg1 |= PMCFG1_RD_BT_FILT_EN;
+ else if (counter == 4 && event != 73)
+ pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
+
+ pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
+ pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
+ writel(pmcfg1, pmu->base + PMCFG1);
+
+ pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
+ pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
+ pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
+ writel(pmcfg2, pmu->base + PMCFG2);
+}
+
+static void ddr_perf_event_update(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+ u64 new_raw_count;
+
+ new_raw_count = ddr_perf_read_counter(pmu, counter);
+ local64_add(new_raw_count, &event->count);
+
+ /* clear counter's value every time */
+ ddr_perf_clear_counter(pmu, counter);
+}
+
+static int ddr_perf_event_init(struct perf_event *event)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0) {
+ dev_warn(pmu->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable (for example to create a CCN group
+ * periodically read when a hrtimer aka cpu-clock leader triggers).
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+
+ return 0;
+}
+
+static void ddr_perf_event_start(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ local64_set(&hwc->prev_count, 0);
+
+ ddr_perf_counter_local_config(pmu, event->attr.config, counter, true);
+ hwc->state = 0;
+}
+
+static int ddr_perf_event_add(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int cfg = event->attr.config;
+ int cfg1 = event->attr.config1;
+ int cfg2 = event->attr.config2;
+ int counter;
+
+ counter = (cfg & 0x0000FF00) >> 8;
+
+ pmu->events[counter] = event;
+ pmu->active_events++;
+ hwc->idx = counter;
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ ddr_perf_event_start(event, flags);
+
+ /* read trans, write trans, read beat */
+ ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
+
+ return 0;
+}
+
+static void ddr_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ ddr_perf_counter_local_config(pmu, event->attr.config, counter, false);
+ ddr_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void ddr_perf_event_del(struct perf_event *event, int flags)
+{
+ struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ ddr_perf_event_stop(event, PERF_EF_UPDATE);
+
+ pmu->active_events--;
+ hwc->idx = -1;
+}
+
+static void ddr_perf_pmu_enable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ ddr_perf_counter_global_config(ddr_pmu, true);
+}
+
+static void ddr_perf_pmu_disable(struct pmu *pmu)
+{
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+
+ ddr_perf_counter_global_config(ddr_pmu, false);
+}
+
+static void ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+ struct device *dev)
+{
+ *pmu = (struct ddr_pmu) {
+ .pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = attr_groups,
+ .event_init = ddr_perf_event_init,
+ .add = ddr_perf_event_add,
+ .del = ddr_perf_event_del,
+ .start = ddr_perf_event_start,
+ .stop = ddr_perf_event_stop,
+ .read = ddr_perf_event_update,
+ .pmu_enable = ddr_perf_pmu_enable,
+ .pmu_disable = ddr_perf_pmu_disable,
+ },
+ .base = base,
+ .dev = dev,
+ };
+}
+
+static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
+{
+ struct ddr_pmu *pmu = (struct ddr_pmu *)p;
+ struct perf_event *event;
+ int i;
+
+ /*
+ * Counters can generate an interrupt on an overflow when msb of a
+ * counter changes from 0 to 1. For the interrupt to be signalled,
+ * below condition mush be satisfied:
+ * PMGC0[PMIE] = 1, PMGC0[FCECE] = 1, PMLCAn[CE] = 1
+ * When an interrupt is signalled, PMGC0[FAC] is set by hardware and
+ * all of the registers are frozen.
+ * Software can clear the interrupt condition by resetting the performance
+ * monitor and clearing the most significant bit of the counter that
+ * generate the overflow.
+ */
+ for (i = 0; i < NUM_COUNTERS; i++) {
+ if (!pmu->events[i])
+ continue;
+
+ event = pmu->events[i];
+
+ ddr_perf_event_update(event);
+ }
+
+ ddr_perf_counter_global_config(pmu, true);
+
+ return IRQ_HANDLED;
+}
+
+static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
+ int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+
+ WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
+
+ return 0;
+}
+
+static int ddr_perf_probe(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu;
+ void __iomem *base;
+ int ret, irq;
+ char *name;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ ddr_perf_init(pmu, base, &pdev->dev);
+
+ pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+
+ platform_set_drvdata(pdev, pmu);
+
+ pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id);
+ if (!name) {
+ ret = -ENOMEM;
+ goto format_string_err;
+ }
+
+ pmu->cpu = raw_smp_processor_id();
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DDR_CPUHP_CB_NAME,
+ NULL, ddr_perf_offline_cpu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add callbacks for multi state\n");
+ goto cpuhp_state_err;
+ }
+ pmu->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ goto cpuhp_instance_err;
+ }
+
+ /* Request irq */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto ddr_perf_err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, ddr_perf_irq_handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ DDR_CPUHP_CB_NAME, pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Request irq failed: %d", ret);
+ goto ddr_perf_err;
+ }
+
+ pmu->irq = irq;
+ ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
+ if (ret) {
+ dev_err(pmu->dev, "Failed to set interrupt affinity\n");
+ goto ddr_perf_err;
+ }
+
+ ret = perf_pmu_register(&pmu->pmu, name, -1);
+ if (ret)
+ goto ddr_perf_err;
+
+ return 0;
+
+ddr_perf_err:
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
+cpuhp_state_err:
+format_string_err:
+ ida_simple_remove(&ddr_ida, pmu->id);
+ dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret);
+ return ret;
+}
+
+static int ddr_perf_remove(struct platform_device *pdev)
+{
+ struct ddr_pmu *pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
+ cpuhp_remove_multi_state(pmu->cpuhp_state);
+
+ perf_pmu_unregister(&pmu->pmu);
+
+ ida_simple_remove(&ddr_ida, pmu->id);
+
+ return 0;
+}
+
+static struct platform_driver imx_ddr_pmu_driver = {
+ .driver = {
+ .name = "imx9-ddr-pmu",
+ .of_match_table = imx_ddr_pmu_dt_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = ddr_perf_probe,
+ .remove = ddr_perf_remove,
+};
+module_platform_driver(imx_ddr_pmu_driver);
+
+MODULE_AUTHOR("Xu Yang <xu.yang_2@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DDRC PerfMon for i.MX9 SoCs");
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 4d2c9abe3372..48dcc8381ea7 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
- hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
+ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index 6fee0b6e163b..e10fc7cb9493 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -683,7 +683,7 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
pcie_pmu->on_cpu = -1;
/* Choose a new CPU from all online cpus. */
- target = cpumask_first(cpu_online_mask);
+ target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
return 0;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 71b6687d6696..d941e746b424 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -22,9 +22,15 @@
#define PA_TT_CTRL 0x1c08
#define PA_TGTID_CTRL 0x1c14
#define PA_SRCID_CTRL 0x1c18
+
+/* H32 PA interrupt registers */
#define PA_INT_MASK 0x1c70
#define PA_INT_STATUS 0x1c78
#define PA_INT_CLEAR 0x1c7c
+
+#define H60PA_INT_STATUS 0x1c70
+#define H60PA_INT_MASK 0x1c74
+
#define PA_EVENT_TYPE0 0x1c80
#define PA_PMU_VERSION 0x1cf0
#define PA_EVENT_CNT0_L 0x1d00
@@ -46,6 +52,12 @@ HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+struct hisi_pa_pmu_int_regs {
+ u32 mask_offset;
+ u32 clear_offset;
+ u32 status_offset;
+};
+
static void hisi_pa_pmu_enable_tracetag(struct perf_event *event)
{
struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu);
@@ -219,40 +231,40 @@ static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu,
static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
u32 val;
/* Write 0 to enable interrupt */
- val = readl(pa_pmu->base + PA_INT_MASK);
+ val = readl(pa_pmu->base + regs->mask_offset);
val &= ~(1 << hwc->idx);
- writel(val, pa_pmu->base + PA_INT_MASK);
+ writel(val, pa_pmu->base + regs->mask_offset);
}
static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
u32 val;
/* Write 1 to mask interrupt */
- val = readl(pa_pmu->base + PA_INT_MASK);
+ val = readl(pa_pmu->base + regs->mask_offset);
val |= 1 << hwc->idx;
- writel(val, pa_pmu->base + PA_INT_MASK);
+ writel(val, pa_pmu->base + regs->mask_offset);
}
static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu)
{
- return readl(pa_pmu->base + PA_INT_STATUS);
+ struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
+
+ return readl(pa_pmu->base + regs->status_offset);
}
static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
{
- writel(1 << idx, pa_pmu->base + PA_INT_CLEAR);
-}
+ struct hisi_pa_pmu_int_regs *regs = pa_pmu->dev_info->private;
-static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
- { "HISI0273", },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
+ writel(1 << idx, pa_pmu->base + regs->clear_offset);
+}
static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
@@ -276,6 +288,10 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev,
pa_pmu->ccl_id = -1;
pa_pmu->sccl_id = -1;
+ pa_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!pa_pmu->dev_info)
+ return -ENODEV;
+
pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pa_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n");
@@ -314,6 +330,32 @@ static const struct attribute_group hisi_pa_pmu_v2_events_group = {
.attrs = hisi_pa_pmu_v2_events_attr,
};
+static struct attribute *hisi_pa_pmu_v3_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(tx_req, 0x0),
+ HISI_PMU_EVENT_ATTR(tx_dat, 0x1),
+ HISI_PMU_EVENT_ATTR(tx_snp, 0x2),
+ HISI_PMU_EVENT_ATTR(rx_req, 0x7),
+ HISI_PMU_EVENT_ATTR(rx_dat, 0x8),
+ HISI_PMU_EVENT_ATTR(rx_snp, 0x9),
+ NULL
+};
+
+static const struct attribute_group hisi_pa_pmu_v3_events_group = {
+ .name = "events",
+ .attrs = hisi_pa_pmu_v3_events_attr,
+};
+
+static struct attribute *hisi_h60pa_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_flit, 0x50),
+ HISI_PMU_EVENT_ATTR(tx_flit, 0x65),
+ NULL
+};
+
+static const struct attribute_group hisi_h60pa_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_h60pa_pmu_events_attr,
+};
+
static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
@@ -337,6 +379,12 @@ static const struct attribute_group hisi_pa_pmu_identifier_group = {
.attrs = hisi_pa_pmu_identifier_attrs,
};
+static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
+ .mask_offset = PA_INT_MASK,
+ .clear_offset = PA_INT_CLEAR,
+ .status_offset = PA_INT_STATUS,
+};
+
static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v2_events_group,
@@ -345,6 +393,46 @@ static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
NULL
};
+static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
+ .name = "pa",
+ .attr_groups = hisi_pa_pmu_v2_attr_groups,
+ .private = &hisi_pa_pmu_regs,
+};
+
+static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
+ &hisi_pa_pmu_v2_format_group,
+ &hisi_pa_pmu_v3_events_group,
+ &hisi_pa_pmu_cpumask_attr_group,
+ &hisi_pa_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_pmu_dev_info hisi_h32pa_v3 = {
+ .name = "pa",
+ .attr_groups = hisi_pa_pmu_v3_attr_groups,
+ .private = &hisi_pa_pmu_regs,
+};
+
+static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
+ .mask_offset = H60PA_INT_MASK,
+ .clear_offset = H60PA_INT_STATUS, /* Clear on write */
+ .status_offset = H60PA_INT_STATUS,
+};
+
+static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
+ &hisi_pa_pmu_v2_format_group,
+ &hisi_h60pa_pmu_events_group,
+ &hisi_pa_pmu_cpumask_attr_group,
+ &hisi_pa_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_pmu_dev_info hisi_h60pa = {
+ .name = "h60pa",
+ .attr_groups = hisi_h60pa_pmu_attr_groups,
+ .private = &hisi_h60pa_pmu_regs,
+};
+
static const struct hisi_uncore_ops hisi_uncore_pa_ops = {
.write_evtype = hisi_pa_pmu_write_evtype,
.get_event_idx = hisi_uncore_pmu_get_event_idx,
@@ -375,7 +463,7 @@ static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups;
+ pa_pmu->pmu_events.attr_groups = pa_pmu->dev_info->attr_groups;
pa_pmu->num_counters = PA_NR_COUNTERS;
pa_pmu->ops = &hisi_uncore_pa_ops;
pa_pmu->check_event = 0xB0;
@@ -400,8 +488,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
- pa_pmu->sicl_id, pa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u",
+ pa_pmu->sicl_id, pa_pmu->dev_info->name,
+ pa_pmu->index_id);
if (!name)
return -ENOMEM;
@@ -435,6 +524,14 @@ static int hisi_pa_pmu_remove(struct platform_device *pdev)
return 0;
}
+static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
+ { "HISI0273", (kernel_ulong_t)&hisi_h32pa_v2 },
+ { "HISI0275", (kernel_ulong_t)&hisi_h32pa_v3 },
+ { "HISI0274", (kernel_ulong_t)&hisi_h60pa },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match);
+
static struct platform_driver hisi_pa_pmu_driver = {
.driver = {
.name = "hisi_pa_pmu",
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 2823f381930d..04031450d5fe 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -20,7 +20,6 @@
#include "hisi_uncore_pmu.h"
-#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
/*
@@ -226,6 +225,9 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
hwc->idx = -1;
hwc->config_base = event->attr.config;
+ if (hisi_pmu->ops->check_filter && hisi_pmu->ops->check_filter(event))
+ return -EINVAL;
+
/* Enforce to use the same CPU for all events in this PMU */
event->cpu = hisi_pmu->on_cpu;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 07890a8e96ca..92402aa69d70 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -43,9 +43,15 @@
return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \
}
+#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+
+#define HISI_PMU_EVTYPE_BITS 8
+#define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS)
+
struct hisi_pmu;
struct hisi_uncore_ops {
+ int (*check_filter)(struct perf_event *event);
void (*write_evtype)(struct hisi_pmu *, int, u32);
int (*get_event_idx)(struct perf_event *);
u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *);
@@ -62,6 +68,13 @@ struct hisi_uncore_ops {
void (*disable_filter)(struct perf_event *event);
};
+/* Describes the HISI PMU chip features information */
+struct hisi_pmu_dev_info {
+ const char *name;
+ const struct attribute_group **attr_groups;
+ void *private;
+};
+
struct hisi_pmu_hwevents {
struct perf_event *hw_events[HISI_MAX_COUNTERS];
DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
@@ -72,6 +85,7 @@ struct hisi_pmu_hwevents {
struct hisi_pmu {
struct pmu pmu;
const struct hisi_uncore_ops *ops;
+ const struct hisi_pmu_dev_info *dev_info;
struct hisi_pmu_hwevents pmu_events;
/* associated_cpus: All CPUs associated with the PMU */
cpumask_t associated_cpus;
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
new file mode 100644
index 000000000000..63da05e5831c
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SoC UC (unified cache) uncore Hardware event counters support
+ *
+ * Copyright (C) 2023 HiSilicon Limited
+ *
+ * This code is based on the uncore PMUs like hisi_uncore_l3c_pmu.
+ */
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* Dynamic CPU hotplug state used by UC PMU */
+static enum cpuhp_state hisi_uc_pmu_online;
+
+/* UC register definition */
+#define HISI_UC_INT_MASK_REG 0x0800
+#define HISI_UC_INT_STS_REG 0x0808
+#define HISI_UC_INT_CLEAR_REG 0x080c
+#define HISI_UC_TRACETAG_CTRL_REG 0x1b2c
+#define HISI_UC_TRACETAG_REQ_MSK GENMASK(9, 7)
+#define HISI_UC_TRACETAG_MARK_EN BIT(0)
+#define HISI_UC_TRACETAG_REQ_EN (HISI_UC_TRACETAG_MARK_EN | BIT(2))
+#define HISI_UC_TRACETAG_SRCID_EN BIT(3)
+#define HISI_UC_SRCID_CTRL_REG 0x1b40
+#define HISI_UC_SRCID_MSK GENMASK(14, 1)
+#define HISI_UC_EVENT_CTRL_REG 0x1c00
+#define HISI_UC_EVENT_TRACETAG_EN BIT(29)
+#define HISI_UC_EVENT_URING_MSK GENMASK(28, 27)
+#define HISI_UC_EVENT_GLB_EN BIT(26)
+#define HISI_UC_VERSION_REG 0x1cf0
+#define HISI_UC_EVTYPE_REGn(n) (0x1d00 + (n) * 4)
+#define HISI_UC_EVTYPE_MASK GENMASK(7, 0)
+#define HISI_UC_CNTR_REGn(n) (0x1e00 + (n) * 8)
+
+#define HISI_UC_NR_COUNTERS 0x8
+#define HISI_UC_V2_NR_EVENTS 0xFF
+#define HISI_UC_CNTR_REG_BITS 64
+
+#define HISI_UC_RD_REQ_TRACETAG 0x4
+#define HISI_UC_URING_EVENT_MIN 0x47
+#define HISI_UC_URING_EVENT_MAX 0x59
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(rd_req_en, config1, 0, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(uring_channel, config1, 5, 4);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid, config1, 19, 6);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_en, config1, 20, 20);
+
+static int hisi_uc_pmu_check_filter(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+
+ if (hisi_get_srcid_en(event) && !hisi_get_rd_req_en(event)) {
+ dev_err(uc_pmu->dev,
+ "rcid_en depends on rd_req_en being enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!hisi_get_uring_channel(event))
+ return 0;
+
+ if ((HISI_GET_EVENTID(event) < HISI_UC_URING_EVENT_MIN) ||
+ (HISI_GET_EVENTID(event) > HISI_UC_URING_EVENT_MAX))
+ dev_warn(uc_pmu->dev,
+ "Only events: [%#x ~ %#x] support channel filtering!",
+ HISI_UC_URING_EVENT_MIN, HISI_UC_URING_EVENT_MAX);
+
+ return 0;
+}
+
+static void hisi_uc_pmu_config_req_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ if (!hisi_get_rd_req_en(event))
+ return;
+
+ val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+
+ /* The request-type has been configured */
+ if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == HISI_UC_RD_REQ_TRACETAG)
+ return;
+
+ /* Set request-type for tracetag, only read request is supported! */
+ val &= ~HISI_UC_TRACETAG_REQ_MSK;
+ val |= FIELD_PREP(HISI_UC_TRACETAG_REQ_MSK, HISI_UC_RD_REQ_TRACETAG);
+ val |= HISI_UC_TRACETAG_REQ_EN;
+ writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+}
+
+static void hisi_uc_pmu_clear_req_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ if (!hisi_get_rd_req_en(event))
+ return;
+
+ val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+
+ /* Do nothing, the request-type tracetag has been cleaned up */
+ if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == 0)
+ return;
+
+ /* Clear request-type */
+ val &= ~HISI_UC_TRACETAG_REQ_MSK;
+ val &= ~HISI_UC_TRACETAG_REQ_EN;
+ writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+}
+
+static void hisi_uc_pmu_config_srcid_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ if (!hisi_get_srcid_en(event))
+ return;
+
+ val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+
+ /* Do nothing, the source id has been configured */
+ if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val))
+ return;
+
+ /* Enable source id tracetag */
+ val |= HISI_UC_TRACETAG_SRCID_EN;
+ writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+
+ val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
+ val &= ~HISI_UC_SRCID_MSK;
+ val |= FIELD_PREP(HISI_UC_SRCID_MSK, hisi_get_srcid(event));
+ writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
+
+ /* Depend on request-type tracetag enabled */
+ hisi_uc_pmu_config_req_tracetag(event);
+}
+
+static void hisi_uc_pmu_clear_srcid_tracetag(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ if (!hisi_get_srcid_en(event))
+ return;
+
+ val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+
+ /* Do nothing, the source id has been cleaned up */
+ if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val) == 0)
+ return;
+
+ hisi_uc_pmu_clear_req_tracetag(event);
+
+ /* Disable source id tracetag */
+ val &= ~HISI_UC_TRACETAG_SRCID_EN;
+ writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
+
+ val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
+ val &= ~HISI_UC_SRCID_MSK;
+ writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
+}
+
+static void hisi_uc_pmu_config_uring_channel(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+ u32 uring_channel = hisi_get_uring_channel(event);
+ u32 val;
+
+ /* Do nothing if not being set or is set explicitly to zero (default) */
+ if (uring_channel == 0)
+ return;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+
+ /* Do nothing, the uring_channel has been configured */
+ if (uring_channel == FIELD_GET(HISI_UC_EVENT_URING_MSK, val))
+ return;
+
+ val &= ~HISI_UC_EVENT_URING_MSK;
+ val |= FIELD_PREP(HISI_UC_EVENT_URING_MSK, uring_channel);
+ writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+}
+
+static void hisi_uc_pmu_clear_uring_channel(struct perf_event *event)
+{
+ struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
+ u32 val;
+
+ /* Do nothing if not being set or is set explicitly to zero (default) */
+ if (hisi_get_uring_channel(event) == 0)
+ return;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+
+ /* Do nothing, the uring_channel has been cleaned up */
+ if (FIELD_GET(HISI_UC_EVENT_URING_MSK, val) == 0)
+ return;
+
+ val &= ~HISI_UC_EVENT_URING_MSK;
+ writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+}
+
+static void hisi_uc_pmu_enable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 == 0)
+ return;
+
+ hisi_uc_pmu_config_uring_channel(event);
+ hisi_uc_pmu_config_req_tracetag(event);
+ hisi_uc_pmu_config_srcid_tracetag(event);
+}
+
+static void hisi_uc_pmu_disable_filter(struct perf_event *event)
+{
+ if (event->attr.config1 == 0)
+ return;
+
+ hisi_uc_pmu_clear_srcid_tracetag(event);
+ hisi_uc_pmu_clear_req_tracetag(event);
+ hisi_uc_pmu_clear_uring_channel(event);
+}
+
+static void hisi_uc_pmu_write_evtype(struct hisi_pmu *uc_pmu, int idx, u32 type)
+{
+ u32 val;
+
+ /*
+ * Select the appropriate event select register.
+ * There are 2 32-bit event select registers for the
+ * 8 hardware counters, each event code is 8-bit wide.
+ */
+ val = readl(uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
+ val &= ~(HISI_UC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
+ val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
+ writel(val, uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
+}
+
+static void hisi_uc_pmu_start_counters(struct hisi_pmu *uc_pmu)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ val |= HISI_UC_EVENT_GLB_EN;
+ writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+}
+
+static void hisi_uc_pmu_stop_counters(struct hisi_pmu *uc_pmu)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ val &= ~HISI_UC_EVENT_GLB_EN;
+ writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+}
+
+static void hisi_uc_pmu_enable_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index */
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ val |= (1 << hwc->idx);
+ writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+}
+
+static void hisi_uc_pmu_disable_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index */
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ val &= ~(1 << hwc->idx);
+ writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+}
+
+static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
+}
+
+static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
+}
+
+static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
+ val &= ~(1 << hwc->idx);
+ writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
+}
+
+static void hisi_uc_pmu_disable_counter_int(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
+ val |= (1 << hwc->idx);
+ writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
+}
+
+static u32 hisi_uc_pmu_get_int_status(struct hisi_pmu *uc_pmu)
+{
+ return readl(uc_pmu->base + HISI_UC_INT_STS_REG);
+}
+
+static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
+{
+ writel(1 << idx, uc_pmu->base + HISI_UC_INT_CLEAR_REG);
+}
+
+static int hisi_uc_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *uc_pmu)
+{
+ /*
+ * Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
+ * identify the topology information of UC PMU devices in the chip.
+ * They have some CCLs per SCCL and then 4 UC PMU per CCL.
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &uc_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
+ &uc_pmu->ccl_id)) {
+ dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
+ &uc_pmu->sub_id)) {
+ dev_err(&pdev->dev, "Can not read sub-id!\n");
+ return -EINVAL;
+ }
+
+ uc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(uc_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for uc_pmu resource\n");
+ return PTR_ERR(uc_pmu->base);
+ }
+
+ uc_pmu->identifier = readl(uc_pmu->base + HISI_UC_VERSION_REG);
+
+ return 0;
+}
+
+static struct attribute *hisi_uc_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(rd_req_en, "config1:0-0"),
+ HISI_PMU_FORMAT_ATTR(uring_channel, "config1:4-5"),
+ HISI_PMU_FORMAT_ATTR(srcid, "config1:6-19"),
+ HISI_PMU_FORMAT_ATTR(srcid_en, "config1:20-20"),
+ NULL
+};
+
+static const struct attribute_group hisi_uc_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_uc_pmu_format_attr,
+};
+
+static struct attribute *hisi_uc_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(sq_time, 0x00),
+ HISI_PMU_EVENT_ATTR(pq_time, 0x01),
+ HISI_PMU_EVENT_ATTR(hbm_time, 0x02),
+ HISI_PMU_EVENT_ATTR(iq_comp_time_cring, 0x03),
+ HISI_PMU_EVENT_ATTR(iq_comp_time_uring, 0x05),
+ HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
+ HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
+ HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
+ HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
+ HISI_PMU_EVENT_ATTR(cycles, 0x9c),
+ HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
+ HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
+ HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
+ HISI_PMU_EVENT_ATTR(cring_txdat_cnt, 0xfb),
+ HISI_PMU_EVENT_ATTR(uring_rxdat_cnt, 0xfc),
+ HISI_PMU_EVENT_ATTR(uring_txdat_cnt, 0xfd),
+ NULL
+};
+
+static const struct attribute_group hisi_uc_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_uc_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
+ .attrs = hisi_uc_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_uc_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
+ &hisi_uc_pmu_identifier_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_uc_pmu_identifier_group = {
+ .attrs = hisi_uc_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
+ &hisi_uc_pmu_format_group,
+ &hisi_uc_pmu_events_group,
+ &hisi_uc_pmu_cpumask_attr_group,
+ &hisi_uc_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_uc_pmu_ops = {
+ .check_filter = hisi_uc_pmu_check_filter,
+ .write_evtype = hisi_uc_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_uc_pmu_start_counters,
+ .stop_counters = hisi_uc_pmu_stop_counters,
+ .enable_counter = hisi_uc_pmu_enable_counter,
+ .disable_counter = hisi_uc_pmu_disable_counter,
+ .enable_counter_int = hisi_uc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_uc_pmu_disable_counter_int,
+ .write_counter = hisi_uc_pmu_write_counter,
+ .read_counter = hisi_uc_pmu_read_counter,
+ .get_int_status = hisi_uc_pmu_get_int_status,
+ .clear_int_status = hisi_uc_pmu_clear_int_status,
+ .enable_filter = hisi_uc_pmu_enable_filter,
+ .disable_filter = hisi_uc_pmu_disable_filter,
+};
+
+static int hisi_uc_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *uc_pmu)
+{
+ int ret;
+
+ ret = hisi_uc_pmu_init_data(pdev, uc_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(uc_pmu, pdev);
+ if (ret)
+ return ret;
+
+ uc_pmu->pmu_events.attr_groups = hisi_uc_pmu_attr_groups;
+ uc_pmu->check_event = HISI_UC_EVTYPE_MASK;
+ uc_pmu->ops = &hisi_uncore_uc_pmu_ops;
+ uc_pmu->counter_bits = HISI_UC_CNTR_REG_BITS;
+ uc_pmu->num_counters = HISI_UC_NR_COUNTERS;
+ uc_pmu->dev = &pdev->dev;
+ uc_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static void hisi_uc_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_uc_pmu_online, hotplug_node);
+}
+
+static void hisi_uc_pmu_unregister_pmu(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_uc_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *uc_pmu;
+ char *name;
+ int ret;
+
+ uc_pmu = devm_kzalloc(&pdev->dev, sizeof(*uc_pmu), GFP_KERNEL);
+ if (!uc_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, uc_pmu);
+
+ ret = hisi_uc_pmu_dev_probe(pdev, uc_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
+ uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Error registering hotplug\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ hisi_uc_pmu_remove_cpuhp_instance,
+ &uc_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(uc_pmu, THIS_MODULE);
+
+ ret = perf_pmu_register(&uc_pmu->pmu, name, -1);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&pdev->dev,
+ hisi_uc_pmu_unregister_pmu,
+ &uc_pmu->pmu);
+}
+
+static const struct acpi_device_id hisi_uc_pmu_acpi_match[] = {
+ { "HISI0291", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_uc_pmu_acpi_match);
+
+static struct platform_driver hisi_uc_pmu_driver = {
+ .driver = {
+ .name = "hisi_uc_pmu",
+ .acpi_match_table = hisi_uc_pmu_acpi_match,
+ /*
+ * We have not worked out a safe bind/unbind process,
+ * Forcefully unbinding during sampling will lead to a
+ * kernel panic, so this is not supported yet.
+ */
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_uc_pmu_probe,
+};
+
+static int __init hisi_uc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/hisi/uc:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("UC PMU: Error setup hotplug, ret = %d\n", ret);
+ return ret;
+ }
+ hisi_uc_pmu_online = ret;
+
+ ret = platform_driver_register(&hisi_uc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_uc_pmu_online);
+
+ return ret;
+}
+module_init(hisi_uc_pmu_module_init);
+
+static void __exit hisi_uc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_uc_pmu_driver);
+ cpuhp_remove_multi_state(hisi_uc_pmu_online);
+}
+module_exit(hisi_uc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index aaca6db7d8f6..3f9a98c17a89 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -857,7 +857,6 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
return -ENOMEM;
INIT_LIST_HEAD(&cluster->next);
- list_add(&cluster->next, &l2cache_pmu->clusters);
cluster->cluster_id = fw_cluster_id;
irq = platform_get_irq(sdev, 0);
@@ -883,6 +882,7 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
spin_lock_init(&cluster->pmu_lock);
+ list_add(&cluster->next, &l2cache_pmu->clusters);
l2cache_pmu->num_pmus++;
return 0;
diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
index c14089fa7db4..cabdddbbabfd 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
@@ -70,7 +70,7 @@ static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy)
HHI_MIPI_CNTL1_BANDGAP);
regmap_write(priv->regmap, HHI_MIPI_CNTL2,
- FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) |
+ FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) |
FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680));
reg = DSI_LANE_CLK;
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
index caa953780bee..8aa7251de4a9 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
@@ -237,11 +237,11 @@ static int mtk_hdmi_pll_calc(struct mtk_hdmi_phy *hdmi_phy, struct clk_hw *hw,
*/
if (tmds_clk < 54 * MEGA)
txposdiv = 8;
- else if (tmds_clk >= 54 * MEGA && tmds_clk < 148.35 * MEGA)
+ else if (tmds_clk >= 54 * MEGA && (tmds_clk * 100) < 14835 * MEGA)
txposdiv = 4;
- else if (tmds_clk >= 148.35 * MEGA && tmds_clk < 296.7 * MEGA)
+ else if ((tmds_clk * 100) >= 14835 * MEGA && (tmds_clk * 10) < 2967 * MEGA)
txposdiv = 2;
- else if (tmds_clk >= 296.7 * MEGA && tmds_clk <= 594 * MEGA)
+ else if ((tmds_clk * 10) >= 2967 * MEGA && tmds_clk <= 594 * MEGA)
txposdiv = 1;
else
return -EINVAL;
@@ -324,12 +324,12 @@ static int mtk_hdmi_pll_drv_setting(struct clk_hw *hw)
clk_channel_bias = 0x34; /* 20mA */
impedance_en = 0xf;
impedance = 0x36; /* 100ohm */
- } else if (pixel_clk >= 74.175 * MEGA && pixel_clk <= 300 * MEGA) {
+ } else if (((u64)pixel_clk * 1000) >= 74175 * MEGA && pixel_clk <= 300 * MEGA) {
data_channel_bias = 0x34; /* 20mA */
clk_channel_bias = 0x2c; /* 16mA */
impedance_en = 0xf;
impedance = 0x36; /* 100ohm */
- } else if (pixel_clk >= 27 * MEGA && pixel_clk < 74.175 * MEGA) {
+ } else if (pixel_clk >= 27 * MEGA && ((u64)pixel_clk * 1000) < 74175 * MEGA) {
data_channel_bias = 0x14; /* 10mA */
clk_channel_bias = 0x14; /* 10mA */
impedance_en = 0x0;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 6850e04c329b..87b17e5877ab 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -2472,7 +2472,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp)
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_unlock;
+ goto err_decrement_count;
}
ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -2522,7 +2522,8 @@ err_assert_reset:
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
+err_decrement_count:
+ qmp->init_count--;
mutex_unlock(&qmp->phy_mutex);
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index 09824be088c9..0c603bc06e09 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -379,7 +379,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_unlock;
+ goto err_decrement_count;
}
ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -409,7 +409,8 @@ err_assert_reset:
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
+err_decrement_count:
+ qmp->init_count--;
mutex_unlock(&qmp->phy_mutex);
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index a59063596214..6c237f3cc66d 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -115,11 +115,11 @@ struct phy_override_seq {
*
* @cfg_ahb_clk: AHB2PHY interface clock
* @ref_clk: phy reference clock
- * @iface_clk: phy interface clock
* @phy_reset: phy reset control
* @vregs: regulator supplies bulk data
* @phy_initialized: if PHY has been initialized correctly
* @mode: contains the current mode the PHY is in
+ * @update_seq_cfg: tuning parameters for phy init
*/
struct qcom_snps_hsphy {
struct phy *phy;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 7bfecdfba177..d249a035c2b9 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOA_15),
GPIO_GROUP(GPIOA_16),
GPIO_GROUP(GPIOA_17),
+ GPIO_GROUP(GPIOA_18),
GPIO_GROUP(GPIOA_19),
GPIO_GROUP(GPIOA_20),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index f279b360c20d..43d3530bab48 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -30,6 +30,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/suspend.h>
#include "core.h"
#include "pinctrl-utils.h"
@@ -636,9 +637,8 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
regval = readl(regs + i);
if (regval & PIN_IRQ_PENDING)
- dev_dbg(&gpio_dev->pdev->dev,
- "GPIO %d is active: 0x%x",
- irqnr + i, regval);
+ pm_pr_dbg("GPIO %d is active: 0x%x",
+ irqnr + i, regval);
/* caused wake on resume context for shared IRQ */
if (irq < 0 && (regval & BIT(WAKE_STS_OFF)))
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index dbe698f33128..e29c51cbfd71 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -372,7 +372,7 @@ static struct i2c_driver cros_ec_driver = {
.of_match_table = of_match_ptr(cros_ec_i2c_of_match),
.pm = &cros_ec_i2c_pm_ops,
},
- .probe_new = cros_ec_i2c_probe,
+ .probe = cros_ec_i2c_probe,
.remove = cros_ec_i2c_remove,
.id_table = cros_ec_i2c_id,
};
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 68bba0fcafab..500a61b093e4 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -315,6 +316,7 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
{
+ static const char *env[] = { "ERROR=PANIC", NULL };
struct cros_ec_device *ec_dev = data;
bool ec_has_more_events;
int ret;
@@ -324,6 +326,7 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
if (value == ACPI_NOTIFY_CROS_EC_PANIC) {
dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!");
blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
+ kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env);
/* Begin orderly shutdown. Force shutdown after 1 second. */
hw_protection_shutdown("CrOS EC Panic", 1000);
/* Do not query for other events after a panic is reported */
@@ -543,23 +546,25 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
#ifdef CONFIG_PM_SLEEP
-static int cros_ec_lpc_suspend(struct device *dev)
+static int cros_ec_lpc_prepare(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_suspend(ec_dev);
}
-static int cros_ec_lpc_resume(struct device *dev)
+static void cros_ec_lpc_complete(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
-
- return cros_ec_resume(ec_dev);
+ cros_ec_resume(ec_dev);
}
#endif
static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend, cros_ec_lpc_resume)
+#ifdef CONFIG_PM_SLEEP
+ .prepare = cros_ec_lpc_prepare,
+ .complete = cros_ec_lpc_complete
+#endif
};
static struct platform_driver cros_ec_lpc_driver = {
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 21143dba8970..3e88cc92e819 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -104,13 +104,7 @@ static void debug_packet(struct device *dev, const char *name, u8 *ptr,
int len)
{
#ifdef DEBUG
- int i;
-
- dev_dbg(dev, "%s: ", name);
- for (i = 0; i < len; i++)
- pr_cont(" %02x", ptr[i]);
-
- pr_cont("\n");
+ dev_dbg(dev, "%s: %*ph\n", name, len, ptr);
#endif
}
diff --git a/drivers/platform/chrome/cros_hps_i2c.c b/drivers/platform/chrome/cros_hps_i2c.c
index 62ccb1acb5de..b31313080332 100644
--- a/drivers/platform/chrome/cros_hps_i2c.c
+++ b/drivers/platform/chrome/cros_hps_i2c.c
@@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(acpi, hps_acpi_id);
#endif /* CONFIG_ACPI */
static struct i2c_driver hps_i2c_driver = {
- .probe_new = hps_i2c_probe,
+ .probe = hps_i2c_probe,
.remove = hps_i2c_remove,
.id_table = hps_i2c_id,
.driver = {
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
index 752720483753..0eefdcf14d63 100644
--- a/drivers/platform/chrome/cros_typec_switch.c
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -51,13 +51,18 @@ static int cros_typec_cmd_mux_set(struct cros_typec_switch_data *sdata, int port
static int cros_typec_get_mux_state(unsigned long mode, struct typec_altmode *alt)
{
int ret = -EOPNOTSUPP;
+ u8 pin_assign;
- if (mode == TYPEC_STATE_SAFE)
+ if (mode == TYPEC_STATE_SAFE) {
ret = USB_PD_MUX_SAFE_MODE;
- else if (mode == TYPEC_STATE_USB)
+ } else if (mode == TYPEC_STATE_USB) {
ret = USB_PD_MUX_USB_ENABLED;
- else if (alt && alt->svid == USB_TYPEC_DP_SID)
+ } else if (alt && alt->svid == USB_TYPEC_DP_SID) {
ret = USB_PD_MUX_DP_ENABLED;
+ pin_assign = mode - TYPEC_STATE_MODAL;
+ if (pin_assign & DP_PIN_ASSIGN_D)
+ ret |= USB_PD_MUX_USB_ENABLED;
+ }
return ret;
}
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index c2c9b0d3244c..be967d797c28 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1348,9 +1348,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
for (i = 0; i < pmc->total_blocks; ++i) {
if (strstr(pmc->block_name[i], "tile")) {
- ret = sscanf(pmc->block_name[i], "tile%d", &tile_num);
- if (ret < 0)
- return ret;
+ if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
+ return -EINVAL;
if (tile_num >= pmc->tile_count)
continue;
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 535581c0471c..7fc602e01487 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
cplt->dev = dev;
- cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
+ cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!cplt->wq)
return -ENOMEM;
diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
index 8f52b62d1c19..c0a1a5869246 100644
--- a/drivers/platform/surface/surface_aggregator_tabletsw.c
+++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
@@ -210,6 +210,7 @@ enum ssam_kip_cover_state {
SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
+ SSAM_KIP_COVER_STATE_BOOK = 0x06,
};
static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw,
@@ -231,6 +232,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw,
case SSAM_KIP_COVER_STATE_FOLDED_BACK:
return "folded-back";
+ case SSAM_KIP_COVER_STATE_BOOK:
+ return "book";
+
default:
dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state->state);
return "<unknown>";
@@ -244,6 +248,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw,
case SSAM_KIP_COVER_STATE_DISCONNECTED:
case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+ case SSAM_KIP_COVER_STATE_BOOK:
return true;
case SSAM_KIP_COVER_STATE_CLOSED:
@@ -335,6 +340,7 @@ enum ssam_pos_state_cover {
SSAM_POS_COVER_LAPTOP = 0x03,
SSAM_POS_COVER_FOLDED_CANVAS = 0x04,
SSAM_POS_COVER_FOLDED_BACK = 0x05,
+ SSAM_POS_COVER_BOOK = 0x06,
};
enum ssam_pos_state_sls {
@@ -367,6 +373,9 @@ static const char *ssam_pos_state_name_cover(struct ssam_tablet_sw *sw, u32 stat
case SSAM_POS_COVER_FOLDED_BACK:
return "folded-back";
+ case SSAM_POS_COVER_BOOK:
+ return "book";
+
default:
dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state);
return "<unknown>";
@@ -416,6 +425,7 @@ static bool ssam_pos_state_is_tablet_mode_cover(struct ssam_tablet_sw *sw, u32 s
case SSAM_POS_COVER_DISCONNECTED:
case SSAM_POS_COVER_FOLDED_CANVAS:
case SSAM_POS_COVER_FOLDED_BACK:
+ case SSAM_POS_COVER_BOOK:
return true;
case SSAM_POS_COVER_CLOSED:
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index 427905714f79..1304cd6f13f6 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -543,7 +543,7 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
}
if (dev)
- dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
+ pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val);
if (s)
seq_printf(s, "SMU idlemask : 0x%x\n", val);
@@ -769,7 +769,7 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
*arg |= (duration << 16);
rc = rtc_alarm_irq_enable(rtc_device, 0);
- dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
+ pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration);
return rc;
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index d5bb775dadcf..7780705917b7 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -245,24 +245,29 @@ static const struct pci_device_id pmf_pci_ids[] = {
{ }
};
-int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
{
u64 phys_addr;
u32 hi, low;
- INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+ phys_addr = virt_to_phys(dev->buf);
+ hi = phys_addr >> 32;
+ low = phys_addr & GENMASK(31, 0);
+
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+}
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
/* Get Metrics Table Address */
dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
if (!dev->buf)
return -ENOMEM;
- phys_addr = virt_to_phys(dev->buf);
- hi = phys_addr >> 32;
- low = phys_addr & GENMASK(31, 0);
+ INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+ amd_pmf_set_dram_addr(dev);
/*
* Start collecting the metrics data after a small delay
@@ -273,6 +278,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
return 0;
}
+static int amd_pmf_resume_handler(struct device *dev)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->buf)
+ amd_pmf_set_dram_addr(pdev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
+
static void amd_pmf_init_features(struct amd_pmf_dev *dev)
{
int ret;
@@ -280,6 +297,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
/* Enable Static Slider */
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
amd_pmf_init_sps(dev);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
@@ -298,8 +317,10 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
{
- if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_sps(dev);
+ }
if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
@@ -382,9 +403,6 @@ static int amd_pmf_probe(struct platform_device *pdev)
apmf_install_handler(dev);
amd_pmf_dbgfs_register(dev);
- dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
- power_supply_reg_notifier(&dev->pwr_src_notifier);
-
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
@@ -394,7 +412,6 @@ static void amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
- power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_features(dev);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
@@ -413,6 +430,7 @@ static struct platform_driver amd_pmf_driver = {
.name = "amd-pmf",
.acpi_match_table = amd_pmf_acpi_ids,
.dev_groups = amd_pmf_driver_groups,
+ .pm = pm_sleep_ptr(&amd_pmf_pm),
},
.probe = amd_pmf_probe,
.remove_new = amd_pmf_remove,
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index e2c9a68d12df..fdf7da06af30 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
{ KE_IGNORE, 0x79, }, /* Charger type dectection notification */
{ KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
+ { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
{ KE_KEY, 0x7c, { KEY_MICMUTE } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
@@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
{ KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
{ KE_KEY, 0xB5, { KEY_CALC } },
+ { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index 61dffb4c8a1d..e6ae8265f3a3 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -208,7 +208,7 @@ static int scan_chunks_sanity_check(struct device *dev)
continue;
reinit_completion(&ifs_done);
local_work.dev = dev;
- INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks);
+ INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks);
schedule_work_on(cpu, &local_work.w);
wait_for_completion(&ifs_done);
if (ifsd->loading_error) {
diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
index 1086c3d83494..399f0623ca1b 100644
--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -101,9 +101,11 @@ int skl_int3472_register_clock(struct int3472_discrete_device *int3472,
int3472->clock.ena_gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
"int3472,clk-enable");
- if (IS_ERR(int3472->clock.ena_gpio))
- return dev_err_probe(int3472->dev, PTR_ERR(int3472->clock.ena_gpio),
- "getting clk-enable GPIO\n");
+ if (IS_ERR(int3472->clock.ena_gpio)) {
+ ret = PTR_ERR(int3472->clock.ena_gpio);
+ int3472->clock.ena_gpio = NULL;
+ return dev_err_probe(int3472->dev, ret, "getting clk-enable GPIO\n");
+ }
if (polarity == GPIO_ACTIVE_LOW)
gpiod_toggle_active_low(int3472->clock.ena_gpio);
@@ -199,8 +201,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
"int3472,regulator");
if (IS_ERR(int3472->regulator.gpio)) {
- dev_err(int3472->dev, "Failed to get regulator GPIO line\n");
- return PTR_ERR(int3472->regulator.gpio);
+ ret = PTR_ERR(int3472->regulator.gpio);
+ int3472->regulator.gpio = NULL;
+ return dev_err_probe(int3472->dev, ret, "getting regulator GPIO\n");
}
/* Ensure the pin is in output mode and non-active state */
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index e0572a29212e..02fe360a59c7 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -304,14 +304,13 @@ struct isst_if_pkg_info {
static struct isst_if_cpu_info *isst_cpu_info;
static struct isst_if_pkg_info *isst_pkg_info;
-#define ISST_MAX_PCI_DOMAINS 8
-
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
{
struct pci_dev *matched_pci_dev = NULL;
struct pci_dev *pci_dev = NULL;
+ struct pci_dev *_pci_dev = NULL;
int no_matches = 0, pkg_id;
- int i, bus_number;
+ int bus_number;
if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
@@ -323,12 +322,11 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
if (bus_number < 0)
return NULL;
- for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
- struct pci_dev *_pci_dev;
+ for_each_pci_dev(_pci_dev) {
int node;
- _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
- if (!_pci_dev)
+ if (_pci_dev->bus->number != bus_number ||
+ _pci_dev->devfn != PCI_DEVFN(dev, fn))
continue;
++no_matches;
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 307ee6f71042..6f83e99d2eb7 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- struct ab8500_btemp *di = power_supply_get_drvdata(psy);
-
- class_for_each_device(power_supply_class, NULL,
- di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+ class_for_each_device(power_supply_class, NULL, psy,
+ ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 41a7bff9ac37..53560fbb6dcd 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2407,10 +2407,8 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- struct ab8500_fg *di = power_supply_get_drvdata(psy);
-
- class_for_each_device(power_supply_class, NULL,
- di->fg_psy, ab8500_fg_get_ext_psy_data);
+ class_for_each_device(power_supply_class, NULL, psy,
+ ab8500_fg_get_ext_psy_data);
}
/**
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 05f413178462..3be6f3b10ea4 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
mutex_lock(&info->lock);
info->valid = 0; /* Force updating of the cached registers */
mutex_unlock(&info->lock);
- power_supply_changed(info->bat);
+ power_supply_changed(psy);
}
static struct power_supply_desc fuel_gauge_desc = {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index de67b985f0a9..dc33f00fcc06 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
bq24190_charger_set_property(bdi->charger,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
&val);
+ power_supply_changed(bdi->charger);
}
/* Sync the input-current-limit with our parent supply (if we have one) */
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 22cde35eb144..f8636cf86505 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -750,7 +750,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
if (bq->chip_version != BQ25892)
return;
- ret = power_supply_get_property_from_supplier(bq->charger,
+ ret = power_supply_get_property_from_supplier(psy,
POWER_SUPPLY_PROP_USB_TYPE,
&val);
if (ret)
@@ -775,6 +775,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
}
bq25890_field_write(bq, F_IINLIM, input_current_limit);
+ power_supply_changed(psy);
}
static int bq25890_get_chip_state(struct bq25890_device *bq,
@@ -1106,6 +1107,8 @@ static void bq25890_pump_express_work(struct work_struct *data)
dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n",
voltage);
+ power_supply_changed(bq->charger);
+
return;
error_print:
bq25890_field_write(bq, F_PUMPX_EN, 0);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 5ff6f44fd47b..4296600e8912 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
return ret;
mutex_lock(&bq27xxx_list_lock);
- list_for_each_entry(di, &bq27xxx_battery_devices, list) {
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
- }
+ list_for_each_entry(di, &bq27xxx_battery_devices, list)
+ mod_delayed_work(system_wq, &di->work, 0);
mutex_unlock(&bq27xxx_list_lock);
return ret;
@@ -1761,60 +1759,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
return POWER_SUPPLY_HEALTH_GOOD;
}
-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
-{
- struct bq27xxx_reg_cache cache = {0, };
- bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
-
- cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
- if ((cache.flags & 0xff) == 0xff)
- cache.flags = -1; /* read error */
- if (cache.flags >= 0) {
- cache.temperature = bq27xxx_battery_read_temperature(di);
- if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
- cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
- if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
- cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
- if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
- cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
-
- cache.charge_full = bq27xxx_battery_read_fcc(di);
- cache.capacity = bq27xxx_battery_read_soc(di);
- if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
- cache.energy = bq27xxx_battery_read_energy(di);
- di->cache.flags = cache.flags;
- cache.health = bq27xxx_battery_read_health(di);
- if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
- cache.cycle_count = bq27xxx_battery_read_cyct(di);
-
- /* We only have to read charge design full once */
- if (di->charge_design_full <= 0)
- di->charge_design_full = bq27xxx_battery_read_dcap(di);
- }
-
- if ((di->cache.capacity != cache.capacity) ||
- (di->cache.flags != cache.flags))
- power_supply_changed(di->bat);
-
- if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
- di->cache = cache;
-
- di->last_update = jiffies;
-}
-EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
-
-static void bq27xxx_battery_poll(struct work_struct *work)
-{
- struct bq27xxx_device_info *di =
- container_of(work, struct bq27xxx_device_info,
- work.work);
-
- bq27xxx_battery_update(di);
-
- if (poll_interval > 0)
- schedule_delayed_work(&di->work, poll_interval * HZ);
-}
-
static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
{
if (di->opts & BQ27XXX_O_ZERO)
@@ -1833,7 +1777,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
static int bq27xxx_battery_current_and_status(
struct bq27xxx_device_info *di,
union power_supply_propval *val_curr,
- union power_supply_propval *val_status)
+ union power_supply_propval *val_status,
+ struct bq27xxx_reg_cache *cache)
{
bool single_flags = (di->opts & BQ27XXX_O_ZERO);
int curr;
@@ -1845,10 +1790,14 @@ static int bq27xxx_battery_current_and_status(
return curr;
}
- flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
- if (flags < 0) {
- dev_err(di->dev, "error reading flags\n");
- return flags;
+ if (cache) {
+ flags = cache->flags;
+ } else {
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
+ if (flags < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return flags;
+ }
}
if (di->opts & BQ27XXX_O_ZERO) {
@@ -1883,6 +1832,78 @@ static int bq27xxx_battery_current_and_status(
return 0;
}
+static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+{
+ union power_supply_propval status = di->last_status;
+ struct bq27xxx_reg_cache cache = {0, };
+ bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+
+ cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+ if ((cache.flags & 0xff) == 0xff)
+ cache.flags = -1; /* read error */
+ if (cache.flags >= 0) {
+ cache.temperature = bq27xxx_battery_read_temperature(di);
+ if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+ cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+ if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+ cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+ if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+ cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+
+ cache.charge_full = bq27xxx_battery_read_fcc(di);
+ cache.capacity = bq27xxx_battery_read_soc(di);
+ if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+ cache.energy = bq27xxx_battery_read_energy(di);
+ di->cache.flags = cache.flags;
+ cache.health = bq27xxx_battery_read_health(di);
+ if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+ cache.cycle_count = bq27xxx_battery_read_cyct(di);
+
+ /*
+ * On gauges with signed current reporting the current must be
+ * checked to detect charging <-> discharging status changes.
+ */
+ if (!(di->opts & BQ27XXX_O_ZERO))
+ bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
+
+ /* We only have to read charge design full once */
+ if (di->charge_design_full <= 0)
+ di->charge_design_full = bq27xxx_battery_read_dcap(di);
+ }
+
+ if ((di->cache.capacity != cache.capacity) ||
+ (di->cache.flags != cache.flags) ||
+ (di->last_status.intval != status.intval)) {
+ di->last_status.intval = status.intval;
+ power_supply_changed(di->bat);
+ }
+
+ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+ di->cache = cache;
+
+ di->last_update = jiffies;
+
+ if (!di->removed && poll_interval > 0)
+ mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+}
+
+void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+{
+ mutex_lock(&di->lock);
+ bq27xxx_battery_update_unlocked(di);
+ mutex_unlock(&di->lock);
+}
+EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
+
+static void bq27xxx_battery_poll(struct work_struct *work)
+{
+ struct bq27xxx_device_info *di =
+ container_of(work, struct bq27xxx_device_info,
+ work.work);
+
+ bq27xxx_battery_update(di);
+}
+
/*
* Get the average power in µW
* Return < 0 if something fails.
@@ -1985,10 +2006,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
mutex_lock(&di->lock);
- if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
- cancel_delayed_work_sync(&di->work);
- bq27xxx_battery_poll(&di->work.work);
- }
+ if (time_is_before_jiffies(di->last_update + 5 * HZ))
+ bq27xxx_battery_update_unlocked(di);
mutex_unlock(&di->lock);
if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
@@ -1996,7 +2015,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- ret = bq27xxx_battery_current_and_status(di, NULL, val);
+ ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = bq27xxx_battery_voltage(di, val);
@@ -2005,7 +2024,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
val->intval = di->cache.flags < 0 ? 0 : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = bq27xxx_battery_current_and_status(di, val, NULL);
+ ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL);
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = bq27xxx_simple_value(di->cache.capacity, val);
@@ -2078,8 +2097,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
{
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
+ /* After charger plug in/out wait 0.5s for things to stabilize */
+ mod_delayed_work(system_wq, &di->work, HZ / 2);
}
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -2127,22 +2146,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
{
- /*
- * power_supply_unregister call bq27xxx_battery_get_property which
- * call bq27xxx_battery_poll.
- * Make sure that bq27xxx_battery_poll will not call
- * schedule_delayed_work again after unregister (which cause OOPS).
- */
- poll_interval = 0;
-
- cancel_delayed_work_sync(&di->work);
-
- power_supply_unregister(di->bat);
-
mutex_lock(&bq27xxx_list_lock);
list_del(&di->list);
mutex_unlock(&bq27xxx_list_lock);
+ /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
+ mutex_lock(&di->lock);
+ di->removed = true;
+ mutex_unlock(&di->lock);
+
+ cancel_delayed_work_sync(&di->work);
+
+ power_supply_unregister(di->bat);
mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index f8768997333b..6d3c74876339 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
i2c_set_clientdata(client, di);
if (client->irq) {
- ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ret = request_threaded_irq(client->irq,
NULL, bq27xxx_battery_irq_handler_thread,
IRQF_ONESHOT,
di->name, di);
@@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+ free_irq(client->irq, di);
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index 92e48e3a4853..1305cba61edd 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -796,7 +796,9 @@ static int mt6360_charger_probe(struct platform_device *pdev)
mci->vinovp = 6500000;
mutex_init(&mci->chgdet_lock);
platform_set_drvdata(pdev, mci);
- devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+ ret = devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to set delayed work\n");
ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
if (ret)
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ab986dbace16..3791aec69ddc 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -348,6 +348,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
struct power_supply *psy = dev_get_drvdata(dev);
unsigned int *count = data;
+ if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
+ if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
+ return 0;
+
(*count)++;
if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY)
if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE,
@@ -366,8 +370,8 @@ int power_supply_is_system_supplied(void)
__power_supply_is_system_supplied);
/*
- * If no power class device was found at all, most probably we are
- * running on a desktop system, so assume we are on mains power.
+ * If no system scope power class device was found at all, most probably we
+ * are running on a desktop system, so assume we are on mains power.
*/
if (count == 0)
return 1;
@@ -573,7 +577,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info;
struct device_node *battery_np = NULL;
struct fwnode_reference_args args;
- struct fwnode_handle *fwnode;
+ struct fwnode_handle *fwnode = NULL;
const char *value;
int err, len, index;
const __be32 *list;
@@ -585,7 +589,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
return -ENODEV;
fwnode = fwnode_handle_get(of_fwnode_handle(battery_np));
- } else {
+ } else if (psy->dev.parent) {
err = fwnode_property_get_reference_args(
dev_fwnode(psy->dev.parent),
"monitored-battery", NULL, 0, 0, &args);
@@ -595,6 +599,9 @@ int power_supply_get_battery_info(struct power_supply *psy,
fwnode = args.fwnode;
}
+ if (!fwnode)
+ return -ENOENT;
+
err = fwnode_property_read_string(fwnode, "compatible", &value);
if (err)
goto out_put_node;
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index 702bf83f6e6d..0674483279d7 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
- led_trigger_event(psy->charging_blink_full_solid_trig,
- LED_FULL);
+ /* Going from blink to LED on requires a LED_OFF event to stop blink */
+ led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
+ led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index ba3b125cd66e..06e5b6b0e255 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -286,7 +286,8 @@ static ssize_t power_supply_show_property(struct device *dev,
if (ret < 0) {
if (ret == -ENODATA)
- dev_dbg(dev, "driver has no data for `%s' property\n",
+ dev_dbg_ratelimited(dev,
+ "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV && ret != -EAGAIN)
dev_err_ratelimited(dev,
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index 73f744a3155d..ea33693b6977 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -1023,7 +1023,7 @@ static int rt9467_request_interrupt(struct rt9467_chg_data *data)
for (i = 0; i < num_chg_irqs; i++) {
virq = regmap_irq_get_virq(data->irq_chip_data, chg_irqs[i].hwirq);
if (virq <= 0)
- return dev_err_probe(dev, virq, "Failed to get (%s) irq\n",
+ return dev_err_probe(dev, -EINVAL, "Failed to get (%s) irq\n",
chg_irqs[i].name);
ret = devm_request_threaded_irq(dev, virq, NULL, chg_irqs[i].handler,
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index 75ebcbf0a788..a14e89ac0369 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -24,7 +24,7 @@
#define SBS_CHARGER_REG_STATUS 0x13
#define SBS_CHARGER_REG_ALARM_WARNING 0x16
-#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(1)
+#define SBS_CHARGER_STATUS_CHARGE_INHIBITED BIT(0)
#define SBS_CHARGER_STATUS_RES_COLD BIT(9)
#define SBS_CHARGER_STATUS_RES_HOT BIT(10)
#define SBS_CHARGER_STATUS_BATTERY_PRESENT BIT(14)
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index 632977f84b95..bd23c4d9fed4 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
return ret;
}
-static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
-{
- struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
-
- power_supply_changed(data->battery);
-}
-
static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
@@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
.num_properties = ARRAY_SIZE(sc27xx_fgu_props),
.get_property = sc27xx_fgu_get_property,
.set_property = sc27xx_fgu_set_property,
- .external_power_changed = sc27xx_fgu_external_power_changed,
+ .external_power_changed = power_supply_changed,
.property_is_writeable = sc27xx_fgu_property_is_writeable,
.no_thermal = true,
};
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 90d33cd1b670..69ef8d081c98 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -18,10 +18,12 @@ if POWERCAP
# Client driver configurations go here.
config INTEL_RAPL_CORE
tristate
+ depends on PCI
+ select IOSF_MBI
config INTEL_RAPL
tristate "Intel RAPL Support via MSR Interface"
- depends on X86 && IOSF_MBI
+ depends on X86 && PCI
select INTEL_RAPL_CORE
help
This enables support for the Intel Running Average Power Limit (RAPL)
@@ -33,6 +35,20 @@ config INTEL_RAPL
controller, CPU core (Power Plane 0), graphics uncore (Power Plane
1), etc.
+config INTEL_RAPL_TPMI
+ tristate "Intel RAPL Support via TPMI Interface"
+ depends on X86
+ depends on INTEL_TPMI
+ select INTEL_RAPL_CORE
+ help
+ This enables support for the Intel Running Average Power Limit (RAPL)
+ technology via TPMI interface, which allows power limits to be enforced
+ and monitored.
+
+ In RAPL, the platform level settings are divided into domains for
+ fine grained control. These domains include processor package, DRAM
+ controller, platform, etc.
+
config IDLE_INJECT
bool "Idle injection framework"
depends on CPU_IDLE
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
index 4474201b4aa7..5ab0dce565b9 100644
--- a/drivers/powercap/Makefile
+++ b/drivers/powercap/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_DTPM_DEVFREQ) += dtpm_devfreq.o
obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
+obj-$(CONFIG_INTEL_RAPL_TPMI) += intel_rapl_tpmi.o
obj-$(CONFIG_IDLE_INJECT) += idle_inject.o
obj-$(CONFIG_ARM_SCMI_POWERCAP) += arm_scmi_powercap.o
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 8970c7b80884..4e646e5e48f6 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -75,6 +75,15 @@
#define PSYS_TIME_WINDOW1_MASK (0x7FULL<<19)
#define PSYS_TIME_WINDOW2_MASK (0x7FULL<<51)
+/* bitmasks for RAPL TPMI, used by primitive access functions */
+#define TPMI_POWER_LIMIT_MASK 0x3FFFF
+#define TPMI_POWER_LIMIT_ENABLE BIT_ULL(62)
+#define TPMI_TIME_WINDOW_MASK (0x7FULL<<18)
+#define TPMI_INFO_SPEC_MASK 0x3FFFF
+#define TPMI_INFO_MIN_MASK (0x3FFFFULL << 18)
+#define TPMI_INFO_MAX_MASK (0x3FFFFULL << 36)
+#define TPMI_INFO_MAX_TIME_WIN_MASK (0x7FULL << 54)
+
/* Non HW constants */
#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
#define RAPL_PRIMITIVE_DUMMY BIT(2)
@@ -94,26 +103,120 @@ enum unit_type {
#define DOMAIN_STATE_INACTIVE BIT(0)
#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
-#define DOMAIN_STATE_BIOS_LOCKED BIT(2)
-static const char pl1_name[] = "long_term";
-static const char pl2_name[] = "short_term";
-static const char pl4_name[] = "peak_power";
+static const char *pl_names[NR_POWER_LIMITS] = {
+ [POWER_LIMIT1] = "long_term",
+ [POWER_LIMIT2] = "short_term",
+ [POWER_LIMIT4] = "peak_power",
+};
+
+enum pl_prims {
+ PL_ENABLE,
+ PL_CLAMP,
+ PL_LIMIT,
+ PL_TIME_WINDOW,
+ PL_MAX_POWER,
+ PL_LOCK,
+};
+
+static bool is_pl_valid(struct rapl_domain *rd, int pl)
+{
+ if (pl < POWER_LIMIT1 || pl > POWER_LIMIT4)
+ return false;
+ return rd->rpl[pl].name ? true : false;
+}
+
+static int get_pl_lock_prim(struct rapl_domain *rd, int pl)
+{
+ if (rd->rp->priv->type == RAPL_IF_TPMI) {
+ if (pl == POWER_LIMIT1)
+ return PL1_LOCK;
+ if (pl == POWER_LIMIT2)
+ return PL2_LOCK;
+ if (pl == POWER_LIMIT4)
+ return PL4_LOCK;
+ }
+
+ /* MSR/MMIO Interface doesn't have Lock bit for PL4 */
+ if (pl == POWER_LIMIT4)
+ return -EINVAL;
+
+ /*
+ * Power Limit register that supports two power limits has a different
+ * bit position for the Lock bit.
+ */
+ if (rd->rp->priv->limits[rd->id] & BIT(POWER_LIMIT2))
+ return FW_HIGH_LOCK;
+ return FW_LOCK;
+}
+
+static int get_pl_prim(struct rapl_domain *rd, int pl, enum pl_prims prim)
+{
+ switch (pl) {
+ case POWER_LIMIT1:
+ if (prim == PL_ENABLE)
+ return PL1_ENABLE;
+ if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI)
+ return PL1_CLAMP;
+ if (prim == PL_LIMIT)
+ return POWER_LIMIT1;
+ if (prim == PL_TIME_WINDOW)
+ return TIME_WINDOW1;
+ if (prim == PL_MAX_POWER)
+ return THERMAL_SPEC_POWER;
+ if (prim == PL_LOCK)
+ return get_pl_lock_prim(rd, pl);
+ return -EINVAL;
+ case POWER_LIMIT2:
+ if (prim == PL_ENABLE)
+ return PL2_ENABLE;
+ if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI)
+ return PL2_CLAMP;
+ if (prim == PL_LIMIT)
+ return POWER_LIMIT2;
+ if (prim == PL_TIME_WINDOW)
+ return TIME_WINDOW2;
+ if (prim == PL_MAX_POWER)
+ return MAX_POWER;
+ if (prim == PL_LOCK)
+ return get_pl_lock_prim(rd, pl);
+ return -EINVAL;
+ case POWER_LIMIT4:
+ if (prim == PL_LIMIT)
+ return POWER_LIMIT4;
+ if (prim == PL_ENABLE)
+ return PL4_ENABLE;
+ /* PL4 would be around two times PL2, use same prim as PL2. */
+ if (prim == PL_MAX_POWER)
+ return MAX_POWER;
+ if (prim == PL_LOCK)
+ return get_pl_lock_prim(rd, pl);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
struct rapl_defaults {
u8 floor_freq_reg_addr;
- int (*check_unit)(struct rapl_package *rp, int cpu);
+ int (*check_unit)(struct rapl_domain *rd);
void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
- u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
+ u64 (*compute_time_window)(struct rapl_domain *rd, u64 val,
bool to_raw);
unsigned int dram_domain_energy_unit;
unsigned int psys_domain_energy_unit;
bool spr_psys_bits;
};
-static struct rapl_defaults *rapl_defaults;
+static struct rapl_defaults *defaults_msr;
+static const struct rapl_defaults defaults_tpmi;
+
+static struct rapl_defaults *get_defaults(struct rapl_package *rp)
+{
+ return rp->priv->defaults;
+}
/* Sideband MBI registers */
#define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2)
@@ -150,6 +253,12 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
unsigned long long value);
+static int rapl_read_pl_data(struct rapl_domain *rd, int pl,
+ enum pl_prims pl_prim,
+ bool xlate, u64 *data);
+static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
+ enum pl_prims pl_prim,
+ unsigned long long value);
static u64 rapl_unit_xlate(struct rapl_domain *rd,
enum unit_type type, u64 value, int to_raw);
static void package_power_limit_irq_save(struct rapl_package *rp);
@@ -217,7 +326,7 @@ static int find_nr_power_limit(struct rapl_domain *rd)
int i, nr_pl = 0;
for (i = 0; i < NR_POWER_LIMITS; i++) {
- if (rd->rpl[i].name)
+ if (is_pl_valid(rd, i))
nr_pl++;
}
@@ -227,37 +336,35 @@ static int find_nr_power_limit(struct rapl_domain *rd)
static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
-
- if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
- return -EACCES;
+ struct rapl_defaults *defaults = get_defaults(rd->rp);
+ int ret;
cpus_read_lock();
- rapl_write_data_raw(rd, PL1_ENABLE, mode);
- if (rapl_defaults->set_floor_freq)
- rapl_defaults->set_floor_freq(rd, mode);
+ ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode);
+ if (!ret && defaults->set_floor_freq)
+ defaults->set_floor_freq(rd, mode);
cpus_read_unlock();
- return 0;
+ return ret;
}
static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
u64 val;
+ int ret;
- if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+ if (rd->rpl[POWER_LIMIT1].locked) {
*mode = false;
return 0;
}
cpus_read_lock();
- if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
- cpus_read_unlock();
- return -EIO;
- }
- *mode = val;
+ ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, true, &val);
+ if (!ret)
+ *mode = val;
cpus_read_unlock();
- return 0;
+ return ret;
}
/* per RAPL domain ops, in the order of rapl_domain_type */
@@ -313,8 +420,8 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid)
{
int i, j;
- for (i = 0, j = 0; i < NR_POWER_LIMITS; i++) {
- if ((rd->rpl[i].name) && j++ == cid) {
+ for (i = POWER_LIMIT1, j = 0; i < NR_POWER_LIMITS; i++) {
+ if (is_pl_valid(rd, i) && j++ == cid) {
pr_debug("%s: index %d\n", __func__, i);
return i;
}
@@ -335,36 +442,11 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
- if (id < 0) {
- ret = id;
- goto set_exit;
- }
-
rp = rd->rp;
- if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
- dev_warn(&power_zone->dev,
- "%s locked by BIOS, monitoring only\n", rd->name);
- ret = -EACCES;
- goto set_exit;
- }
-
- switch (rd->rpl[id].prim_id) {
- case PL1_ENABLE:
- rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
- break;
- case PL2_ENABLE:
- rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
- break;
- case PL4_ENABLE:
- rapl_write_data_raw(rd, POWER_LIMIT4, power_limit);
- break;
- default:
- ret = -EINVAL;
- }
+ ret = rapl_write_pl_data(rd, id, PL_LIMIT, power_limit);
if (!ret)
package_power_limit_irq_save(rp);
-set_exit:
cpus_read_unlock();
return ret;
}
@@ -374,38 +456,17 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
{
struct rapl_domain *rd;
u64 val;
- int prim;
int ret = 0;
int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
- if (id < 0) {
- ret = id;
- goto get_exit;
- }
- switch (rd->rpl[id].prim_id) {
- case PL1_ENABLE:
- prim = POWER_LIMIT1;
- break;
- case PL2_ENABLE:
- prim = POWER_LIMIT2;
- break;
- case PL4_ENABLE:
- prim = POWER_LIMIT4;
- break;
- default:
- cpus_read_unlock();
- return -EINVAL;
- }
- if (rapl_read_data_raw(rd, prim, true, &val))
- ret = -EIO;
- else
+ ret = rapl_read_pl_data(rd, id, PL_LIMIT, true, &val);
+ if (!ret)
*data = val;
-get_exit:
cpus_read_unlock();
return ret;
@@ -421,23 +482,9 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
- if (id < 0) {
- ret = id;
- goto set_time_exit;
- }
- switch (rd->rpl[id].prim_id) {
- case PL1_ENABLE:
- rapl_write_data_raw(rd, TIME_WINDOW1, window);
- break;
- case PL2_ENABLE:
- rapl_write_data_raw(rd, TIME_WINDOW2, window);
- break;
- default:
- ret = -EINVAL;
- }
+ ret = rapl_write_pl_data(rd, id, PL_TIME_WINDOW, window);
-set_time_exit:
cpus_read_unlock();
return ret;
}
@@ -453,33 +500,11 @@ static int get_time_window(struct powercap_zone *power_zone, int cid,
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
- if (id < 0) {
- ret = id;
- goto get_time_exit;
- }
- switch (rd->rpl[id].prim_id) {
- case PL1_ENABLE:
- ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
- break;
- case PL2_ENABLE:
- ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
- break;
- case PL4_ENABLE:
- /*
- * Time window parameter is not applicable for PL4 entry
- * so assigining '0' as default value.
- */
- val = 0;
- break;
- default:
- cpus_read_unlock();
- return -EINVAL;
- }
+ ret = rapl_read_pl_data(rd, id, PL_TIME_WINDOW, true, &val);
if (!ret)
*data = val;
-get_time_exit:
cpus_read_unlock();
return ret;
@@ -499,36 +524,23 @@ static const char *get_constraint_name(struct powercap_zone *power_zone,
return NULL;
}
-static int get_max_power(struct powercap_zone *power_zone, int id, u64 *data)
+static int get_max_power(struct powercap_zone *power_zone, int cid, u64 *data)
{
struct rapl_domain *rd;
u64 val;
- int prim;
int ret = 0;
+ int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
- switch (rd->rpl[id].prim_id) {
- case PL1_ENABLE:
- prim = THERMAL_SPEC_POWER;
- break;
- case PL2_ENABLE:
- prim = MAX_POWER;
- break;
- case PL4_ENABLE:
- prim = MAX_POWER;
- break;
- default:
- cpus_read_unlock();
- return -EINVAL;
- }
- if (rapl_read_data_raw(rd, prim, true, &val))
- ret = -EIO;
- else
+ id = contraint_to_pl(rd, cid);
+
+ ret = rapl_read_pl_data(rd, id, PL_MAX_POWER, true, &val);
+ if (!ret)
*data = val;
/* As a generalization rule, PL4 would be around two times PL2. */
- if (rd->rpl[id].prim_id == PL4_ENABLE)
+ if (id == POWER_LIMIT4)
*data = *data * 2;
cpus_read_unlock();
@@ -545,6 +557,12 @@ static const struct powercap_zone_constraint_ops constraint_ops = {
.get_name = get_constraint_name,
};
+/* Return the id used for read_raw/write_raw callback */
+static int get_rid(struct rapl_package *rp)
+{
+ return rp->lead_cpu >= 0 ? rp->lead_cpu : rp->id;
+}
+
/* called after domain detection and package level data are set */
static void rapl_init_domains(struct rapl_package *rp)
{
@@ -554,6 +572,7 @@ static void rapl_init_domains(struct rapl_package *rp)
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
unsigned int mask = rp->domain_map & (1 << i);
+ int t;
if (!mask)
continue;
@@ -562,51 +581,26 @@ static void rapl_init_domains(struct rapl_package *rp)
if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) {
snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d",
- topology_physical_package_id(rp->lead_cpu));
- } else
+ rp->lead_cpu >= 0 ? topology_physical_package_id(rp->lead_cpu) :
+ rp->id);
+ } else {
snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s",
rapl_domain_names[i]);
+ }
rd->id = i;
- rd->rpl[0].prim_id = PL1_ENABLE;
- rd->rpl[0].name = pl1_name;
- /*
- * The PL2 power domain is applicable for limits two
- * and limits three
- */
- if (rp->priv->limits[i] >= 2) {
- rd->rpl[1].prim_id = PL2_ENABLE;
- rd->rpl[1].name = pl2_name;
- }
+ /* PL1 is supported by default */
+ rp->priv->limits[i] |= BIT(POWER_LIMIT1);
- /* Enable PL4 domain if the total power limits are three */
- if (rp->priv->limits[i] == 3) {
- rd->rpl[2].prim_id = PL4_ENABLE;
- rd->rpl[2].name = pl4_name;
+ for (t = POWER_LIMIT1; t < NR_POWER_LIMITS; t++) {
+ if (rp->priv->limits[i] & BIT(t))
+ rd->rpl[t].name = pl_names[t];
}
for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++)
rd->regs[j] = rp->priv->regs[i][j];
- switch (i) {
- case RAPL_DOMAIN_DRAM:
- rd->domain_energy_unit =
- rapl_defaults->dram_domain_energy_unit;
- if (rd->domain_energy_unit)
- pr_info("DRAM domain energy unit %dpj\n",
- rd->domain_energy_unit);
- break;
- case RAPL_DOMAIN_PLATFORM:
- rd->domain_energy_unit =
- rapl_defaults->psys_domain_energy_unit;
- if (rd->domain_energy_unit)
- pr_info("Platform domain energy unit %dpj\n",
- rd->domain_energy_unit);
- break;
- default:
- break;
- }
rd++;
}
}
@@ -615,23 +609,19 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
u64 value, int to_raw)
{
u64 units = 1;
- struct rapl_package *rp = rd->rp;
+ struct rapl_defaults *defaults = get_defaults(rd->rp);
u64 scale = 1;
switch (type) {
case POWER_UNIT:
- units = rp->power_unit;
+ units = rd->power_unit;
break;
case ENERGY_UNIT:
scale = ENERGY_UNIT_SCALE;
- /* per domain unit takes precedence */
- if (rd->domain_energy_unit)
- units = rd->domain_energy_unit;
- else
- units = rp->energy_unit;
+ units = rd->energy_unit;
break;
case TIME_UNIT:
- return rapl_defaults->compute_time_window(rp, value, to_raw);
+ return defaults->compute_time_window(rd, value, to_raw);
case ARBITRARY_UNIT:
default:
return value;
@@ -645,67 +635,141 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
return div64_u64(value, scale);
}
-/* in the order of enum rapl_primitives */
-static struct rapl_primitive_info rpi[] = {
+/* RAPL primitives for MSR and MMIO I/F */
+static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
/* name, mask, shift, msr index, unit divisor */
- PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
- RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
- PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+ [POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+ [POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
+ [POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
+ [ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+ RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
+ [FW_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+ [FW_HIGH_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_HIGH_LOCK, 63,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+ [PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+ [PL1_CLAMP] = PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ [PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
+ [PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+ [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
- PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+ [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
- PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+ [THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
0, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+ [MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+ [MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+ [MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0),
- PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+ [THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
- PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+ [PRIORITY_LEVEL] = PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0,
+ [PSYS_POWER_LIMIT1] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32,
+ [PSYS_POWER_LIMIT2] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
- PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17,
+ [PSYS_PL1_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49,
+ [PSYS_PL2_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19,
+ [PSYS_TIME_WINDOW1] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
- PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51,
+ [PSYS_TIME_WINDOW2] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
/* non-hardware */
- PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+ [AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
RAPL_PRIMITIVE_DERIVED),
- {NULL, 0, 0, 0},
};
+/* RAPL primitives for TPMI I/F */
+static struct rapl_primitive_info rpi_tpmi[NR_RAPL_PRIMITIVES] = {
+ /* name, mask, shift, msr index, unit divisor */
+ [POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, TPMI_POWER_LIMIT_MASK, 0,
+ RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
+ [POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, TPMI_POWER_LIMIT_MASK, 0,
+ RAPL_DOMAIN_REG_PL2, POWER_UNIT, 0),
+ [POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, TPMI_POWER_LIMIT_MASK, 0,
+ RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
+ [ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+ RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
+ [PL1_LOCK] = PRIMITIVE_INFO_INIT(PL1_LOCK, POWER_HIGH_LOCK, 63,
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ [PL2_LOCK] = PRIMITIVE_INFO_INIT(PL2_LOCK, POWER_HIGH_LOCK, 63,
+ RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0),
+ [PL4_LOCK] = PRIMITIVE_INFO_INIT(PL4_LOCK, POWER_HIGH_LOCK, 63,
+ RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
+ [PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
+ RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
+ [PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
+ RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0),
+ [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
+ RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
+ [TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TPMI_TIME_WINDOW_MASK, 18,
+ RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
+ [TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TPMI_TIME_WINDOW_MASK, 18,
+ RAPL_DOMAIN_REG_PL2, TIME_UNIT, 0),
+ [THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, TPMI_INFO_SPEC_MASK, 0,
+ RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
+ [MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, TPMI_INFO_MAX_MASK, 36,
+ RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
+ [MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, TPMI_INFO_MIN_MASK, 18,
+ RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
+ [MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, TPMI_INFO_MAX_TIME_WIN_MASK, 54,
+ RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0),
+ [THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+ RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
+ /* non-hardware */
+ [AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0,
+ POWER_UNIT, RAPL_PRIMITIVE_DERIVED),
+};
+
+static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim)
+{
+ struct rapl_primitive_info *rpi = rp->priv->rpi;
+
+ if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi)
+ return NULL;
+
+ return &rpi[prim];
+}
+
+static int rapl_config(struct rapl_package *rp)
+{
+ switch (rp->priv->type) {
+ /* MMIO I/F shares the same register layout as MSR registers */
+ case RAPL_IF_MMIO:
+ case RAPL_IF_MSR:
+ rp->priv->defaults = (void *)defaults_msr;
+ rp->priv->rpi = (void *)rpi_msr;
+ break;
+ case RAPL_IF_TPMI:
+ rp->priv->defaults = (void *)&defaults_tpmi;
+ rp->priv->rpi = (void *)rpi_tpmi;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static enum rapl_primitives
prim_fixups(struct rapl_domain *rd, enum rapl_primitives prim)
{
- if (!rapl_defaults->spr_psys_bits)
+ struct rapl_defaults *defaults = get_defaults(rd->rp);
+
+ if (!defaults->spr_psys_bits)
return prim;
if (rd->id != RAPL_DOMAIN_PLATFORM)
@@ -747,41 +811,33 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
{
u64 value;
enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
- struct rapl_primitive_info *rp = &rpi[prim_fixed];
+ struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed);
struct reg_action ra;
- int cpu;
- if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+ if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY)
return -EINVAL;
- ra.reg = rd->regs[rp->id];
+ ra.reg = rd->regs[rpi->id];
if (!ra.reg)
return -EINVAL;
- cpu = rd->rp->lead_cpu;
-
- /* domain with 2 limits has different bit */
- if (prim == FW_LOCK && rd->rp->priv->limits[rd->id] == 2) {
- rp->mask = POWER_HIGH_LOCK;
- rp->shift = 63;
- }
/* non-hardware data are collected by the polling thread */
- if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+ if (rpi->flag & RAPL_PRIMITIVE_DERIVED) {
*data = rd->rdd.primitives[prim];
return 0;
}
- ra.mask = rp->mask;
+ ra.mask = rpi->mask;
- if (rd->rp->priv->read_raw(cpu, &ra)) {
- pr_debug("failed to read reg 0x%llx on cpu %d\n", ra.reg, cpu);
+ if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
+ pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg, rd->rp->name, rd->name);
return -EIO;
}
- value = ra.value >> rp->shift;
+ value = ra.value >> rpi->shift;
if (xlate)
- *data = rapl_unit_xlate(rd, rp->unit, value, 0);
+ *data = rapl_unit_xlate(rd, rpi->unit, value, 0);
else
*data = value;
@@ -794,28 +850,56 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
unsigned long long value)
{
enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
- struct rapl_primitive_info *rp = &rpi[prim_fixed];
- int cpu;
+ struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed);
u64 bits;
struct reg_action ra;
int ret;
- cpu = rd->rp->lead_cpu;
- bits = rapl_unit_xlate(rd, rp->unit, value, 1);
- bits <<= rp->shift;
- bits &= rp->mask;
+ if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY)
+ return -EINVAL;
+
+ bits = rapl_unit_xlate(rd, rpi->unit, value, 1);
+ bits <<= rpi->shift;
+ bits &= rpi->mask;
memset(&ra, 0, sizeof(ra));
- ra.reg = rd->regs[rp->id];
- ra.mask = rp->mask;
+ ra.reg = rd->regs[rpi->id];
+ ra.mask = rpi->mask;
ra.value = bits;
- ret = rd->rp->priv->write_raw(cpu, &ra);
+ ret = rd->rp->priv->write_raw(get_rid(rd->rp), &ra);
return ret;
}
+static int rapl_read_pl_data(struct rapl_domain *rd, int pl,
+ enum pl_prims pl_prim, bool xlate, u64 *data)
+{
+ enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim);
+
+ if (!is_pl_valid(rd, pl))
+ return -EINVAL;
+
+ return rapl_read_data_raw(rd, prim, xlate, data);
+}
+
+static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
+ enum pl_prims pl_prim,
+ unsigned long long value)
+{
+ enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim);
+
+ if (!is_pl_valid(rd, pl))
+ return -EINVAL;
+
+ if (rd->rpl[pl].locked) {
+ pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
+ return -EACCES;
+ }
+
+ return rapl_write_data_raw(rd, prim, value);
+}
/*
* Raw RAPL data stored in MSRs are in certain scales. We need to
* convert them into standard units based on the units reported in
@@ -827,58 +911,58 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
* power unit : microWatts : Represented in milliWatts by default
* time unit : microseconds: Represented in seconds by default
*/
-static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
+static int rapl_check_unit_core(struct rapl_domain *rd)
{
struct reg_action ra;
u32 value;
- ra.reg = rp->priv->reg_unit;
+ ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
ra.mask = ~0;
- if (rp->priv->read_raw(cpu, &ra)) {
- pr_err("Failed to read power unit REG 0x%llx on CPU %d, exit.\n",
- rp->priv->reg_unit, cpu);
+ if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
+ pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
+ ra.reg, rd->rp->name, rd->name);
return -ENODEV;
}
value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
- rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
+ rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
- rp->power_unit = 1000000 / (1 << value);
+ rd->power_unit = 1000000 / (1 << value);
value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
- rp->time_unit = 1000000 / (1 << value);
+ rd->time_unit = 1000000 / (1 << value);
- pr_debug("Core CPU %s energy=%dpJ, time=%dus, power=%duW\n",
- rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n",
+ rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
return 0;
}
-static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
+static int rapl_check_unit_atom(struct rapl_domain *rd)
{
struct reg_action ra;
u32 value;
- ra.reg = rp->priv->reg_unit;
+ ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
ra.mask = ~0;
- if (rp->priv->read_raw(cpu, &ra)) {
- pr_err("Failed to read power unit REG 0x%llx on CPU %d, exit.\n",
- rp->priv->reg_unit, cpu);
+ if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
+ pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
+ ra.reg, rd->rp->name, rd->name);
return -ENODEV;
}
value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
- rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
+ rd->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
- rp->power_unit = (1 << value) * 1000;
+ rd->power_unit = (1 << value) * 1000;
value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
- rp->time_unit = 1000000 / (1 << value);
+ rd->time_unit = 1000000 / (1 << value);
- pr_debug("Atom %s energy=%dpJ, time=%dus, power=%duW\n",
- rp->name, rp->energy_unit, rp->time_unit, rp->power_unit);
+ pr_debug("Atom %s:%s energy=%dpJ, time=%dus, power=%duW\n",
+ rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
return 0;
}
@@ -910,6 +994,9 @@ static void power_limit_irq_save_cpu(void *info)
static void package_power_limit_irq_save(struct rapl_package *rp)
{
+ if (rp->lead_cpu < 0)
+ return;
+
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
@@ -924,6 +1011,9 @@ static void package_power_limit_irq_restore(struct rapl_package *rp)
{
u32 l, h;
+ if (rp->lead_cpu < 0)
+ return;
+
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
@@ -943,33 +1033,33 @@ static void package_power_limit_irq_restore(struct rapl_package *rp)
static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
{
- int nr_powerlimit = find_nr_power_limit(rd);
+ int i;
/* always enable clamp such that p-state can go below OS requested
* range. power capping priority over guranteed frequency.
*/
- rapl_write_data_raw(rd, PL1_CLAMP, mode);
+ rapl_write_pl_data(rd, POWER_LIMIT1, PL_CLAMP, mode);
- /* some domains have pl2 */
- if (nr_powerlimit > 1) {
- rapl_write_data_raw(rd, PL2_ENABLE, mode);
- rapl_write_data_raw(rd, PL2_CLAMP, mode);
+ for (i = POWER_LIMIT2; i < NR_POWER_LIMITS; i++) {
+ rapl_write_pl_data(rd, i, PL_ENABLE, mode);
+ rapl_write_pl_data(rd, i, PL_CLAMP, mode);
}
}
static void set_floor_freq_atom(struct rapl_domain *rd, bool enable)
{
static u32 power_ctrl_orig_val;
+ struct rapl_defaults *defaults = get_defaults(rd->rp);
u32 mdata;
- if (!rapl_defaults->floor_freq_reg_addr) {
+ if (!defaults->floor_freq_reg_addr) {
pr_err("Invalid floor frequency config register\n");
return;
}
if (!power_ctrl_orig_val)
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_CR_READ,
- rapl_defaults->floor_freq_reg_addr,
+ defaults->floor_freq_reg_addr,
&power_ctrl_orig_val);
mdata = power_ctrl_orig_val;
if (enable) {
@@ -977,10 +1067,10 @@ static void set_floor_freq_atom(struct rapl_domain *rd, bool enable)
mdata |= 1 << 8;
}
iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_CR_WRITE,
- rapl_defaults->floor_freq_reg_addr, mdata);
+ defaults->floor_freq_reg_addr, mdata);
}
-static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
+static u64 rapl_compute_time_window_core(struct rapl_domain *rd, u64 value,
bool to_raw)
{
u64 f, y; /* fraction and exp. used for time unit */
@@ -992,12 +1082,12 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
if (!to_raw) {
f = (value & 0x60) >> 5;
y = value & 0x1f;
- value = (1 << y) * (4 + f) * rp->time_unit / 4;
+ value = (1 << y) * (4 + f) * rd->time_unit / 4;
} else {
- if (value < rp->time_unit)
+ if (value < rd->time_unit)
return 0;
- do_div(value, rp->time_unit);
+ do_div(value, rd->time_unit);
y = ilog2(value);
/*
@@ -1013,7 +1103,7 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
return value;
}
-static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value,
+static u64 rapl_compute_time_window_atom(struct rapl_domain *rd, u64 value,
bool to_raw)
{
/*
@@ -1021,13 +1111,56 @@ static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value,
* where time_unit is default to 1 sec. Never 0.
*/
if (!to_raw)
- return (value) ? value * rp->time_unit : rp->time_unit;
+ return (value) ? value * rd->time_unit : rd->time_unit;
- value = div64_u64(value, rp->time_unit);
+ value = div64_u64(value, rd->time_unit);
return value;
}
+/* TPMI Unit register has different layout */
+#define TPMI_POWER_UNIT_OFFSET POWER_UNIT_OFFSET
+#define TPMI_POWER_UNIT_MASK POWER_UNIT_MASK
+#define TPMI_ENERGY_UNIT_OFFSET 0x06
+#define TPMI_ENERGY_UNIT_MASK 0x7C0
+#define TPMI_TIME_UNIT_OFFSET 0x0C
+#define TPMI_TIME_UNIT_MASK 0xF000
+
+static int rapl_check_unit_tpmi(struct rapl_domain *rd)
+{
+ struct reg_action ra;
+ u32 value;
+
+ ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
+ ra.mask = ~0;
+ if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
+ pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
+ ra.reg, rd->rp->name, rd->name);
+ return -ENODEV;
+ }
+
+ value = (ra.value & TPMI_ENERGY_UNIT_MASK) >> TPMI_ENERGY_UNIT_OFFSET;
+ rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
+
+ value = (ra.value & TPMI_POWER_UNIT_MASK) >> TPMI_POWER_UNIT_OFFSET;
+ rd->power_unit = 1000000 / (1 << value);
+
+ value = (ra.value & TPMI_TIME_UNIT_MASK) >> TPMI_TIME_UNIT_OFFSET;
+ rd->time_unit = 1000000 / (1 << value);
+
+ pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n",
+ rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
+
+ return 0;
+}
+
+static const struct rapl_defaults defaults_tpmi = {
+ .check_unit = rapl_check_unit_tpmi,
+ /* Reuse existing logic, ignore the PL_CLAMP failures and enable all Power Limits */
+ .set_floor_freq = set_floor_freq_default,
+ .compute_time_window = rapl_compute_time_window_core,
+};
+
static const struct rapl_defaults rapl_defaults_core = {
.floor_freq_reg_addr = 0,
.check_unit = rapl_check_unit_core,
@@ -1159,8 +1292,10 @@ static void rapl_update_domain_data(struct rapl_package *rp)
rp->domains[dmn].name);
/* exclude non-raw primitives */
for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
+ struct rapl_primitive_info *rpi = get_rpi(rp, prim);
+
if (!rapl_read_data_raw(&rp->domains[dmn], prim,
- rpi[prim].unit, &val))
+ rpi->unit, &val))
rp->domains[dmn].rdd.primitives[prim] = val;
}
}
@@ -1239,7 +1374,7 @@ err_cleanup:
return ret;
}
-static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
+static int rapl_check_domain(int domain, struct rapl_package *rp)
{
struct reg_action ra;
@@ -1260,9 +1395,43 @@ static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
*/
ra.mask = ENERGY_STATUS_MASK;
- if (rp->priv->read_raw(cpu, &ra) || !ra.value)
+ if (rp->priv->read_raw(get_rid(rp), &ra) || !ra.value)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Get per domain energy/power/time unit.
+ * RAPL Interfaces without per domain unit register will use the package
+ * scope unit register to set per domain units.
+ */
+static int rapl_get_domain_unit(struct rapl_domain *rd)
+{
+ struct rapl_defaults *defaults = get_defaults(rd->rp);
+ int ret;
+
+ if (!rd->regs[RAPL_DOMAIN_REG_UNIT]) {
+ if (!rd->rp->priv->reg_unit) {
+ pr_err("No valid Unit register found\n");
+ return -ENODEV;
+ }
+ rd->regs[RAPL_DOMAIN_REG_UNIT] = rd->rp->priv->reg_unit;
+ }
+
+ if (!defaults->check_unit) {
+ pr_err("missing .check_unit() callback\n");
return -ENODEV;
+ }
+
+ ret = defaults->check_unit(rd);
+ if (ret)
+ return ret;
+ if (rd->id == RAPL_DOMAIN_DRAM && defaults->dram_domain_energy_unit)
+ rd->energy_unit = defaults->dram_domain_energy_unit;
+ if (rd->id == RAPL_DOMAIN_PLATFORM && defaults->psys_domain_energy_unit)
+ rd->energy_unit = defaults->psys_domain_energy_unit;
return 0;
}
@@ -1280,19 +1449,16 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
u64 val64;
int i;
- /* check if the domain is locked by BIOS, ignore if MSR doesn't exist */
- if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) {
- if (val64) {
- pr_info("RAPL %s domain %s locked by BIOS\n",
- rd->rp->name, rd->name);
- rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
+ if (!rapl_read_pl_data(rd, i, PL_LOCK, false, &val64)) {
+ if (val64) {
+ rd->rpl[i].locked = true;
+ pr_info("%s:%s:%s locked by BIOS\n",
+ rd->rp->name, rd->name, pl_names[i]);
+ }
}
- }
- /* check if power limit MSR exists, otherwise domain is monitoring only */
- for (i = 0; i < NR_POWER_LIMITS; i++) {
- int prim = rd->rpl[i].prim_id;
- if (rapl_read_data_raw(rd, prim, false, &val64))
+ if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64))
rd->rpl[i].name = NULL;
}
}
@@ -1300,14 +1466,14 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
/* Detect active and valid domains for the given CPU, caller must
* ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
*/
-static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+static int rapl_detect_domains(struct rapl_package *rp)
{
struct rapl_domain *rd;
int i;
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
/* use physical package id to read counters */
- if (!rapl_check_domain(cpu, i, rp)) {
+ if (!rapl_check_domain(i, rp)) {
rp->domain_map |= 1 << i;
pr_info("Found RAPL domain %s\n", rapl_domain_names[i]);
}
@@ -1326,8 +1492,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
rapl_init_domains(rp);
- for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++)
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ rapl_get_domain_unit(rd);
rapl_detect_powerlimit(rd);
+ }
return 0;
}
@@ -1340,13 +1508,13 @@ void rapl_remove_package(struct rapl_package *rp)
package_power_limit_irq_restore(rp);
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
- rapl_write_data_raw(rd, PL1_ENABLE, 0);
- rapl_write_data_raw(rd, PL1_CLAMP, 0);
- if (find_nr_power_limit(rd) > 1) {
- rapl_write_data_raw(rd, PL2_ENABLE, 0);
- rapl_write_data_raw(rd, PL2_CLAMP, 0);
- rapl_write_data_raw(rd, PL4_ENABLE, 0);
+ int i;
+
+ for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
+ rapl_write_pl_data(rd, i, PL_ENABLE, 0);
+ rapl_write_pl_data(rd, i, PL_CLAMP, 0);
}
+
if (rd->id == RAPL_DOMAIN_PACKAGE) {
rd_package = rd;
continue;
@@ -1365,13 +1533,18 @@ void rapl_remove_package(struct rapl_package *rp)
EXPORT_SYMBOL_GPL(rapl_remove_package);
/* caller to ensure CPU hotplug lock is held */
-struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv)
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
{
- int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
+ int uid;
+
+ if (id_is_cpu)
+ uid = topology_logical_die_id(id);
+ else
+ uid = id;
list_for_each_entry(rp, &rapl_packages, plist) {
- if (rp->id == id
+ if (rp->id == uid
&& rp->priv->control_type == priv->control_type)
return rp;
}
@@ -1381,34 +1554,37 @@ struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv
EXPORT_SYMBOL_GPL(rapl_find_package_domain);
/* called from CPU hotplug notifier, hotplug lock held */
-struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv)
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
{
- int id = topology_logical_die_id(cpu);
struct rapl_package *rp;
int ret;
- if (!rapl_defaults)
- return ERR_PTR(-ENODEV);
-
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
if (!rp)
return ERR_PTR(-ENOMEM);
- /* add the new package to the list */
- rp->id = id;
- rp->lead_cpu = cpu;
- rp->priv = priv;
+ if (id_is_cpu) {
+ rp->id = topology_logical_die_id(id);
+ rp->lead_cpu = id;
+ if (topology_max_die_per_package() > 1)
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d",
+ topology_physical_package_id(id), topology_die_id(id));
+ else
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
+ topology_physical_package_id(id));
+ } else {
+ rp->id = id;
+ rp->lead_cpu = -1;
+ snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", id);
+ }
- if (topology_max_die_per_package() > 1)
- snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH,
- "package-%d-die-%d",
- topology_physical_package_id(cpu), topology_die_id(cpu));
- else
- snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
- topology_physical_package_id(cpu));
+ rp->priv = priv;
+ ret = rapl_config(rp);
+ if (ret)
+ goto err_free_package;
/* check if the package contains valid domains */
- if (rapl_detect_domains(rp, cpu) || rapl_defaults->check_unit(rp, cpu)) {
+ if (rapl_detect_domains(rp)) {
ret = -ENODEV;
goto err_free_package;
}
@@ -1430,38 +1606,18 @@ static void power_limit_state_save(void)
{
struct rapl_package *rp;
struct rapl_domain *rd;
- int nr_pl, ret, i;
+ int ret, i;
cpus_read_lock();
list_for_each_entry(rp, &rapl_packages, plist) {
if (!rp->power_zone)
continue;
rd = power_zone_to_rapl_domain(rp->power_zone);
- nr_pl = find_nr_power_limit(rd);
- for (i = 0; i < nr_pl; i++) {
- switch (rd->rpl[i].prim_id) {
- case PL1_ENABLE:
- ret = rapl_read_data_raw(rd,
- POWER_LIMIT1, true,
- &rd->rpl[i].last_power_limit);
- if (ret)
- rd->rpl[i].last_power_limit = 0;
- break;
- case PL2_ENABLE:
- ret = rapl_read_data_raw(rd,
- POWER_LIMIT2, true,
+ for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
+ ret = rapl_read_pl_data(rd, i, PL_LIMIT, true,
&rd->rpl[i].last_power_limit);
- if (ret)
- rd->rpl[i].last_power_limit = 0;
- break;
- case PL4_ENABLE:
- ret = rapl_read_data_raw(rd,
- POWER_LIMIT4, true,
- &rd->rpl[i].last_power_limit);
- if (ret)
- rd->rpl[i].last_power_limit = 0;
- break;
- }
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
}
}
cpus_read_unlock();
@@ -1471,33 +1627,17 @@ static void power_limit_state_restore(void)
{
struct rapl_package *rp;
struct rapl_domain *rd;
- int nr_pl, i;
+ int i;
cpus_read_lock();
list_for_each_entry(rp, &rapl_packages, plist) {
if (!rp->power_zone)
continue;
rd = power_zone_to_rapl_domain(rp->power_zone);
- nr_pl = find_nr_power_limit(rd);
- for (i = 0; i < nr_pl; i++) {
- switch (rd->rpl[i].prim_id) {
- case PL1_ENABLE:
- if (rd->rpl[i].last_power_limit)
- rapl_write_data_raw(rd, POWER_LIMIT1,
- rd->rpl[i].last_power_limit);
- break;
- case PL2_ENABLE:
- if (rd->rpl[i].last_power_limit)
- rapl_write_data_raw(rd, POWER_LIMIT2,
- rd->rpl[i].last_power_limit);
- break;
- case PL4_ENABLE:
- if (rd->rpl[i].last_power_limit)
- rapl_write_data_raw(rd, POWER_LIMIT4,
- rd->rpl[i].last_power_limit);
- break;
- }
- }
+ for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++)
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_pl_data(rd, i, PL_LIMIT,
+ rd->rpl[i].last_power_limit);
}
cpus_read_unlock();
}
@@ -1528,32 +1668,25 @@ static int __init rapl_init(void)
int ret;
id = x86_match_cpu(rapl_ids);
- if (!id) {
- pr_err("driver does not support CPU family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
-
- return -ENODEV;
- }
+ if (id) {
+ defaults_msr = (struct rapl_defaults *)id->driver_data;
- rapl_defaults = (struct rapl_defaults *)id->driver_data;
-
- ret = register_pm_notifier(&rapl_pm_notifier);
- if (ret)
- return ret;
+ rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0);
+ if (!rapl_msr_platdev)
+ return -ENOMEM;
- rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0);
- if (!rapl_msr_platdev) {
- ret = -ENOMEM;
- goto end;
+ ret = platform_device_add(rapl_msr_platdev);
+ if (ret) {
+ platform_device_put(rapl_msr_platdev);
+ return ret;
+ }
}
- ret = platform_device_add(rapl_msr_platdev);
- if (ret)
+ ret = register_pm_notifier(&rapl_pm_notifier);
+ if (ret && rapl_msr_platdev) {
+ platform_device_del(rapl_msr_platdev);
platform_device_put(rapl_msr_platdev);
-
-end:
- if (ret)
- unregister_pm_notifier(&rapl_pm_notifier);
+ }
return ret;
}
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index a27673706c3d..569e25eab1e1 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -22,7 +22,6 @@
#include <linux/processor.h>
#include <linux/platform_device.h>
-#include <asm/iosf_mbi.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -34,6 +33,7 @@
static struct rapl_if_priv *rapl_msr_priv;
static struct rapl_if_priv rapl_msr_priv_intel = {
+ .type = RAPL_IF_MSR,
.reg_unit = MSR_RAPL_POWER_UNIT,
.regs[RAPL_DOMAIN_PACKAGE] = {
MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO },
@@ -45,11 +45,12 @@ static struct rapl_if_priv rapl_msr_priv_intel = {
MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO },
.regs[RAPL_DOMAIN_PLATFORM] = {
MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0},
- .limits[RAPL_DOMAIN_PACKAGE] = 2,
- .limits[RAPL_DOMAIN_PLATFORM] = 2,
+ .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2),
+ .limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2),
};
static struct rapl_if_priv rapl_msr_priv_amd = {
+ .type = RAPL_IF_MSR,
.reg_unit = MSR_AMD_RAPL_POWER_UNIT,
.regs[RAPL_DOMAIN_PACKAGE] = {
0, MSR_AMD_PKG_ENERGY_STATUS, 0, 0, 0 },
@@ -68,9 +69,9 @@ static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_package *rp;
- rp = rapl_find_package_domain(cpu, rapl_msr_priv);
+ rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
if (!rp) {
- rp = rapl_add_package(cpu, rapl_msr_priv);
+ rp = rapl_add_package(cpu, rapl_msr_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -83,7 +84,7 @@ static int rapl_cpu_down_prep(unsigned int cpu)
struct rapl_package *rp;
int lead_cpu;
- rp = rapl_find_package_domain(cpu, rapl_msr_priv);
+ rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
if (!rp)
return 0;
@@ -137,14 +138,14 @@ static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
/* List of verified CPUs. */
static const struct x86_cpu_id pl4_support_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_METEORLAKE_L, X86_FEATURE_ANY },
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
{}
};
@@ -169,7 +170,7 @@ static int rapl_msr_probe(struct platform_device *pdev)
rapl_msr_priv->write_raw = rapl_msr_write_raw;
if (id) {
- rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] = 3;
+ rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4);
rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4] =
MSR_VR_CURRENT_CONFIG;
pr_info("PL4 support detected.\n");
diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c
new file mode 100644
index 000000000000..4f4f13ded225
--- /dev/null
+++ b/drivers/powercap/intel_rapl_tpmi.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_rapl_tpmi: Intel RAPL driver via TPMI interface
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/auxiliary_bus.h>
+#include <linux/io.h>
+#include <linux/intel_tpmi.h>
+#include <linux/intel_rapl.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define TPMI_RAPL_VERSION 1
+
+/* 1 header + 10 registers + 5 reserved. 8 bytes for each. */
+#define TPMI_RAPL_DOMAIN_SIZE 128
+
+enum tpmi_rapl_domain_type {
+ TPMI_RAPL_DOMAIN_INVALID,
+ TPMI_RAPL_DOMAIN_SYSTEM,
+ TPMI_RAPL_DOMAIN_PACKAGE,
+ TPMI_RAPL_DOMAIN_RESERVED,
+ TPMI_RAPL_DOMAIN_MEMORY,
+ TPMI_RAPL_DOMAIN_MAX,
+};
+
+enum tpmi_rapl_register {
+ TPMI_RAPL_REG_HEADER,
+ TPMI_RAPL_REG_UNIT,
+ TPMI_RAPL_REG_PL1,
+ TPMI_RAPL_REG_PL2,
+ TPMI_RAPL_REG_PL3,
+ TPMI_RAPL_REG_PL4,
+ TPMI_RAPL_REG_RESERVED,
+ TPMI_RAPL_REG_ENERGY_STATUS,
+ TPMI_RAPL_REG_PERF_STATUS,
+ TPMI_RAPL_REG_POWER_INFO,
+ TPMI_RAPL_REG_INTERRUPT,
+ TPMI_RAPL_REG_MAX = 15,
+};
+
+struct tpmi_rapl_package {
+ struct rapl_if_priv priv;
+ struct intel_tpmi_plat_info *tpmi_info;
+ struct rapl_package *rp;
+ void __iomem *base;
+ struct list_head node;
+};
+
+static LIST_HEAD(tpmi_rapl_packages);
+static DEFINE_MUTEX(tpmi_rapl_lock);
+
+static struct powercap_control_type *tpmi_control_type;
+
+static int tpmi_rapl_read_raw(int id, struct reg_action *ra)
+{
+ if (!ra->reg)
+ return -EINVAL;
+
+ ra->value = readq((void __iomem *)ra->reg);
+
+ ra->value &= ra->mask;
+ return 0;
+}
+
+static int tpmi_rapl_write_raw(int id, struct reg_action *ra)
+{
+ u64 val;
+
+ if (!ra->reg)
+ return -EINVAL;
+
+ val = readq((void __iomem *)ra->reg);
+
+ val &= ~ra->mask;
+ val |= ra->value;
+
+ writeq(val, (void __iomem *)ra->reg);
+ return 0;
+}
+
+static struct tpmi_rapl_package *trp_alloc(int pkg_id)
+{
+ struct tpmi_rapl_package *trp;
+ int ret;
+
+ mutex_lock(&tpmi_rapl_lock);
+
+ if (list_empty(&tpmi_rapl_packages)) {
+ tpmi_control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+ if (IS_ERR(tpmi_control_type)) {
+ ret = PTR_ERR(tpmi_control_type);
+ goto err_unlock;
+ }
+ }
+
+ trp = kzalloc(sizeof(*trp), GFP_KERNEL);
+ if (!trp) {
+ ret = -ENOMEM;
+ goto err_del_powercap;
+ }
+
+ list_add(&trp->node, &tpmi_rapl_packages);
+
+ mutex_unlock(&tpmi_rapl_lock);
+ return trp;
+
+err_del_powercap:
+ if (list_empty(&tpmi_rapl_packages))
+ powercap_unregister_control_type(tpmi_control_type);
+err_unlock:
+ mutex_unlock(&tpmi_rapl_lock);
+ return ERR_PTR(ret);
+}
+
+static void trp_release(struct tpmi_rapl_package *trp)
+{
+ mutex_lock(&tpmi_rapl_lock);
+ list_del(&trp->node);
+
+ if (list_empty(&tpmi_rapl_packages))
+ powercap_unregister_control_type(tpmi_control_type);
+
+ kfree(trp);
+ mutex_unlock(&tpmi_rapl_lock);
+}
+
+static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
+{
+ u8 tpmi_domain_version;
+ enum rapl_domain_type domain_type;
+ enum tpmi_rapl_domain_type tpmi_domain_type;
+ enum tpmi_rapl_register reg_index;
+ enum rapl_domain_reg_id reg_id;
+ int tpmi_domain_size, tpmi_domain_flags;
+ u64 *tpmi_rapl_regs = trp->base + offset;
+ u64 tpmi_domain_header = readq((void __iomem *)tpmi_rapl_regs);
+
+ /* Domain Parent bits are ignored for now */
+ tpmi_domain_version = tpmi_domain_header & 0xff;
+ tpmi_domain_type = tpmi_domain_header >> 8 & 0xff;
+ tpmi_domain_size = tpmi_domain_header >> 16 & 0xff;
+ tpmi_domain_flags = tpmi_domain_header >> 32 & 0xffff;
+
+ if (tpmi_domain_version != TPMI_RAPL_VERSION) {
+ pr_warn(FW_BUG "Unsupported version:%d\n", tpmi_domain_version);
+ return -ENODEV;
+ }
+
+ /* Domain size: in unit of 128 Bytes */
+ if (tpmi_domain_size != 1) {
+ pr_warn(FW_BUG "Invalid Domain size %d\n", tpmi_domain_size);
+ return -EINVAL;
+ }
+
+ /* Unit register and Energy Status register are mandatory for each domain */
+ if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_UNIT)) ||
+ !(tpmi_domain_flags & BIT(TPMI_RAPL_REG_ENERGY_STATUS))) {
+ pr_warn(FW_BUG "Invalid Domain flag 0x%x\n", tpmi_domain_flags);
+ return -EINVAL;
+ }
+
+ switch (tpmi_domain_type) {
+ case TPMI_RAPL_DOMAIN_PACKAGE:
+ domain_type = RAPL_DOMAIN_PACKAGE;
+ break;
+ case TPMI_RAPL_DOMAIN_SYSTEM:
+ domain_type = RAPL_DOMAIN_PLATFORM;
+ break;
+ case TPMI_RAPL_DOMAIN_MEMORY:
+ domain_type = RAPL_DOMAIN_DRAM;
+ break;
+ default:
+ pr_warn(FW_BUG "Unsupported Domain type %d\n", tpmi_domain_type);
+ return -EINVAL;
+ }
+
+ if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT]) {
+ pr_warn(FW_BUG "Duplicate Domain type %d\n", tpmi_domain_type);
+ return -EINVAL;
+ }
+
+ reg_index = TPMI_RAPL_REG_HEADER;
+ while (++reg_index != TPMI_RAPL_REG_MAX) {
+ if (!(tpmi_domain_flags & BIT(reg_index)))
+ continue;
+
+ switch (reg_index) {
+ case TPMI_RAPL_REG_UNIT:
+ reg_id = RAPL_DOMAIN_REG_UNIT;
+ break;
+ case TPMI_RAPL_REG_PL1:
+ reg_id = RAPL_DOMAIN_REG_LIMIT;
+ trp->priv.limits[domain_type] |= BIT(POWER_LIMIT1);
+ break;
+ case TPMI_RAPL_REG_PL2:
+ reg_id = RAPL_DOMAIN_REG_PL2;
+ trp->priv.limits[domain_type] |= BIT(POWER_LIMIT2);
+ break;
+ case TPMI_RAPL_REG_PL4:
+ reg_id = RAPL_DOMAIN_REG_PL4;
+ trp->priv.limits[domain_type] |= BIT(POWER_LIMIT4);
+ break;
+ case TPMI_RAPL_REG_ENERGY_STATUS:
+ reg_id = RAPL_DOMAIN_REG_STATUS;
+ break;
+ case TPMI_RAPL_REG_PERF_STATUS:
+ reg_id = RAPL_DOMAIN_REG_PERF;
+ break;
+ case TPMI_RAPL_REG_POWER_INFO:
+ reg_id = RAPL_DOMAIN_REG_INFO;
+ break;
+ default:
+ continue;
+ }
+ trp->priv.regs[domain_type][reg_id] = (u64)&tpmi_rapl_regs[reg_index];
+ }
+
+ return 0;
+}
+
+static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct tpmi_rapl_package *trp;
+ struct intel_tpmi_plat_info *info;
+ struct resource *res;
+ u32 offset;
+ int ret;
+
+ info = tpmi_get_platform_data(auxdev);
+ if (!info)
+ return -ENODEV;
+
+ trp = trp_alloc(info->package_id);
+ if (IS_ERR(trp))
+ return PTR_ERR(trp);
+
+ if (tpmi_get_resource_count(auxdev) > 1) {
+ dev_err(&auxdev->dev, "does not support multiple resources\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ res = tpmi_get_resource_at_index(auxdev, 0);
+ if (!res) {
+ dev_err(&auxdev->dev, "can't fetch device resource info\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ trp->base = devm_ioremap_resource(&auxdev->dev, res);
+ if (IS_ERR(trp->base)) {
+ ret = PTR_ERR(trp->base);
+ goto err;
+ }
+
+ for (offset = 0; offset < resource_size(res); offset += TPMI_RAPL_DOMAIN_SIZE) {
+ ret = parse_one_domain(trp, offset);
+ if (ret)
+ goto err;
+ }
+
+ trp->tpmi_info = info;
+ trp->priv.type = RAPL_IF_TPMI;
+ trp->priv.read_raw = tpmi_rapl_read_raw;
+ trp->priv.write_raw = tpmi_rapl_write_raw;
+ trp->priv.control_type = tpmi_control_type;
+
+ /* RAPL TPMI I/F is per physical package */
+ trp->rp = rapl_find_package_domain(info->package_id, &trp->priv, false);
+ if (trp->rp) {
+ dev_err(&auxdev->dev, "Domain for Package%d already exists\n", info->package_id);
+ ret = -EEXIST;
+ goto err;
+ }
+
+ trp->rp = rapl_add_package(info->package_id, &trp->priv, false);
+ if (IS_ERR(trp->rp)) {
+ dev_err(&auxdev->dev, "Failed to add RAPL Domain for Package%d, %ld\n",
+ info->package_id, PTR_ERR(trp->rp));
+ ret = PTR_ERR(trp->rp);
+ goto err;
+ }
+
+ auxiliary_set_drvdata(auxdev, trp);
+
+ return 0;
+err:
+ trp_release(trp);
+ return ret;
+}
+
+static void intel_rapl_tpmi_remove(struct auxiliary_device *auxdev)
+{
+ struct tpmi_rapl_package *trp = auxiliary_get_drvdata(auxdev);
+
+ rapl_remove_package(trp->rp);
+ trp_release(trp);
+}
+
+static const struct auxiliary_device_id intel_rapl_tpmi_ids[] = {
+ {.name = "intel_vsec.tpmi-rapl" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(auxiliary, intel_rapl_tpmi_ids);
+
+static struct auxiliary_driver intel_rapl_tpmi_driver = {
+ .probe = intel_rapl_tpmi_probe,
+ .remove = intel_rapl_tpmi_remove,
+ .id_table = intel_rapl_tpmi_ids,
+};
+
+module_auxiliary_driver(intel_rapl_tpmi_driver)
+
+MODULE_IMPORT_NS(INTEL_TPMI);
+
+MODULE_DESCRIPTION("Intel RAPL TPMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 0c567d9623cd..5f7d286871cf 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -6,7 +6,7 @@
* Bo Shen <voice.shen@atmel.com>
*
* Links to reference manuals for the supported PWM chips can be found in
- * Documentation/arm/microchip.rst.
+ * Documentation/arch/arm/microchip.rst.
*
* Limitations:
* - Periods start with the inactive level.
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 46ed668bd141..762429d5647f 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -8,7 +8,7 @@
* eric miao <eric.miao@marvell.com>
*
* Links to reference manuals for some of the supported PWM chips can be found
- * in Documentation/arm/marvell.rst.
+ * in Documentation/arch/arm/marvell.rst.
*
* Limitations:
* - When PWM is stopped, the current PWM period stops abruptly at the next
diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c
index f0a6391b1146..ffb973c328e3 100644
--- a/drivers/ras/debugfs.c
+++ b/drivers/ras/debugfs.c
@@ -46,7 +46,7 @@ int __init ras_add_daemon_trace(void)
fentry = debugfs_create_file("daemon_active", S_IRUSR, ras_debugfs_dir,
NULL, &trace_fops);
- if (!fentry)
+ if (IS_ERR(fentry))
return -ENODEV;
return 0;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index dc741ac156c3..698ab7f5004b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5256,7 +5256,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
}
rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
- if (!rdev->debugfs) {
+ if (IS_ERR(rdev->debugfs)) {
rdev_warn(rdev, "Failed to create debugfs directory\n");
return;
}
@@ -6178,7 +6178,7 @@ static int __init regulator_init(void)
ret = class_register(&regulator_class);
debugfs_root = debugfs_create_dir("regulator", NULL);
- if (!debugfs_root)
+ if (IS_ERR(debugfs_root))
pr_warn("regulator: Failed to create debugfs directory\n");
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c
index 1849566784ab..3eb86ec21d08 100644
--- a/drivers/regulator/mt6359-regulator.c
+++ b/drivers/regulator/mt6359-regulator.c
@@ -951,9 +951,12 @@ static int mt6359_regulator_probe(struct platform_device *pdev)
struct regulator_config config = {};
struct regulator_dev *rdev;
struct mt6359_regulator_info *mt6359_info;
- int i, hw_ver;
+ int i, hw_ver, ret;
+
+ ret = regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
+ if (ret)
+ return ret;
- regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
if (hw_ver >= MT6359P_CHIP_VER)
mt6359_info = mt6359p_regulators;
else
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 87a746dcb516..e75dd92f86ca 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -264,7 +264,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
- .enable_mask = BUCK1_ENMODE_MASK,
+ .enable_mask = BUCK2_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -502,7 +502,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
.vsel_mask = BUCK2OUT_DVS0_MASK,
.enable_reg = PCA9450_REG_BUCK2CTRL,
- .enable_mask = BUCK1_ENMODE_MASK,
+ .enable_mask = BUCK2_ENMODE_MASK,
.ramp_reg = PCA9450_REG_BUCK2CTRL,
.ramp_mask = BUCK2_RAMP_MASK,
.ramp_delay_table = pca9450_dvs_buck_ramp_table,
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index b0a58c62b1e2..f3b280af0773 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1057,21 +1057,21 @@ static const struct rpmh_vreg_init_data pm8450_vreg_data[] = {
};
static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
- RPMH_VREG("ldo1", "ldo%s1", &pmic5_pldo, "vdd-l1-l4-l10"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1-l4-l10"),
RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, "vdd-l2-l13-l14"),
- RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
- RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l1-l4-l10"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo515, "vdd-l1-l4-l10"),
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l16"),
- RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo_lv, "vdd-l6-l7"),
- RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l6-l7"),
- RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo_lv, "vdd-l8-l9"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l6-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, "vdd-l6-l7"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo, "vdd-l8-l9"),
RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"),
- RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo, "vdd-l1-l4-l10"),
- RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo, "vdd-l11"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"),
RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"),
RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"),
RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"),
- RPMH_VREG("ldo15", "ldo%s15", &pmic5_pldo, "vdd-l15"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"),
RPMH_VREG("ldo16", "ldo%s16", &pmic5_pldo, "vdd-l5-l16"),
RPMH_VREG("ldo17", "ldo%s17", &pmic5_pldo, "vdd-l17"),
RPMH_VREG("bob1", "bob%s1", &pmic5_bob, "vdd-bob1"),
@@ -1086,9 +1086,9 @@ static const struct rpmh_vreg_init_data pm8550vs_vreg_data[] = {
RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_lv, "vdd-s4"),
RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_mv, "vdd-s6"),
- RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
- RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"),
- RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
{}
};
@@ -1101,9 +1101,9 @@ static const struct rpmh_vreg_init_data pm8550ve_vreg_data[] = {
RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
- RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
- RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"),
- RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
{}
};
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 9fbfce735d56..45788955c4e6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3234,12 +3234,12 @@ struct blk_mq_ops dasd_mq_ops = {
.exit_hctx = dasd_exit_hctx,
};
-static int dasd_open(struct block_device *bdev, fmode_t mode)
+static int dasd_open(struct gendisk *disk, blk_mode_t mode)
{
struct dasd_device *base;
int rc;
- base = dasd_device_from_gendisk(bdev->bd_disk);
+ base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
@@ -3268,14 +3268,12 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
rc = -ENODEV;
goto out;
}
-
- if ((mode & FMODE_WRITE) &&
+ if ((mode & BLK_OPEN_WRITE) &&
(test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
(base->features & DASD_FEATURE_READONLY))) {
rc = -EROFS;
goto out;
}
-
dasd_put_device(base);
return 0;
@@ -3287,7 +3285,7 @@ unlock:
return rc;
}
-static void dasd_release(struct gendisk *disk, fmode_t mode)
+static void dasd_release(struct gendisk *disk)
{
struct dasd_device *base = dasd_device_from_gendisk(disk);
if (base) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ade1369fe5ed..113c509bf6d0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
struct dasd_device *, struct dasd_device *,
unsigned int, int, unsigned int, unsigned int,
unsigned int, unsigned int);
+static int dasd_eckd_query_pprc_status(struct dasd_device *,
+ struct dasd_pprc_data_sc4 *);
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
@@ -3733,6 +3735,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
return count;
}
+static int dasd_in_copy_relation(struct dasd_device *device)
+{
+ struct dasd_pprc_data_sc4 *temp;
+ int rc;
+
+ if (!dasd_eckd_pprc_enabled(device))
+ return 0;
+
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ rc = dasd_eckd_query_pprc_status(device, temp);
+ if (!rc)
+ rc = temp->dev_info[0].state;
+
+ kfree(temp);
+ return rc;
+}
+
/*
* Release allocated space for a given range or an entire volume.
*/
@@ -3749,6 +3771,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
int cur_to_trk, cur_from_trk;
struct dasd_ccw_req *cqr;
u32 beg_cyl, end_cyl;
+ int copy_relation;
struct ccw1 *ccw;
int trks_per_ext;
size_t ras_size;
@@ -3760,6 +3783,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
return ERR_PTR(-EINVAL);
+ copy_relation = dasd_in_copy_relation(device);
+ if (copy_relation < 0)
+ return ERR_PTR(copy_relation);
+
rq = req ? blk_mq_rq_to_pdu(req) : NULL;
features = &private->features;
@@ -3788,9 +3815,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
/*
* This bit guarantees initialisation of tracks within an extent that is
* not fully specified, but is only supported with a certain feature
- * subset.
+ * subset and for devices not in a copy relation.
*/
- ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
+ if (features->feature[56] & 0x01 && !copy_relation)
+ ras_data->op_flags.guarantee_init = 1;
+
ras_data->lss = private->conf.ned->ID;
ras_data->dev_addr = private->conf.ned->unit_addr;
ras_data->nr_exts = nr_exts;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 998a961e1704..fe5108a1b332 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -130,7 +130,8 @@ int dasd_scan_partitions(struct dasd_block *block)
struct block_device *bdev;
int rc;
- bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL);
+ bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL,
+ NULL);
if (IS_ERR(bdev)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
@@ -179,7 +180,7 @@ void dasd_destroy_partitions(struct dasd_block *block)
mutex_unlock(&bdev->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- blkdev_put(bdev, FMODE_READ);
+ blkdev_put(bdev, NULL);
}
int dasd_gendisk_init(void)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33f812f0e515..0aa56351da72 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -965,7 +965,8 @@ int dasd_scan_partitions(struct dasd_block *);
void dasd_destroy_partitions(struct dasd_block *);
/* externals in dasd_ioctl.c */
-int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
+int dasd_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd,
+ unsigned long arg);
int dasd_set_read_only(struct block_device *bdev, bool ro);
/* externals in dasd_proc.c */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9327dcdd6e5e..513a7e6eee63 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -552,10 +552,10 @@ static int __dasd_ioctl_information(struct dasd_block *block,
memcpy(dasd_info->type, base->discipline->name, 4);
- spin_lock_irqsave(&block->queue_lock, flags);
+ spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
- spin_unlock_irqrestore(&block->queue_lock, flags);
+ spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
@@ -612,7 +612,7 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
return ret;
}
-int dasd_ioctl(struct block_device *bdev, fmode_t mode,
+int dasd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index c09f2e053bf8..200f88f0e451 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -28,8 +28,8 @@
#define DCSSBLK_PARM_LEN 400
#define DCSS_BUS_ID_SIZE 20
-static int dcssblk_open(struct block_device *bdev, fmode_t mode);
-static void dcssblk_release(struct gendisk *disk, fmode_t mode);
+static int dcssblk_open(struct gendisk *disk, blk_mode_t mode);
+static void dcssblk_release(struct gendisk *disk);
static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
@@ -809,12 +809,11 @@ out_buf:
}
static int
-dcssblk_open(struct block_device *bdev, fmode_t mode)
+dcssblk_open(struct gendisk *disk, blk_mode_t mode)
{
- struct dcssblk_dev_info *dev_info;
+ struct dcssblk_dev_info *dev_info = disk->private_data;
int rc;
- dev_info = bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
goto out;
@@ -826,7 +825,7 @@ out:
}
static void
-dcssblk_release(struct gendisk *disk, fmode_t mode)
+dcssblk_release(struct gendisk *disk)
{
struct dcssblk_dev_info *dev_info = disk->private_data;
struct segment_info *entry;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 8eb089b99cde..c0d620ffea61 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1111,6 +1111,8 @@ static void io_subchannel_verify(struct subchannel *sch)
cdev = sch_get_cdev(sch);
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ else
+ css_schedule_eval(sch->schid);
}
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
@@ -1374,6 +1376,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
enum io_sch_action {
IO_SCH_UNREG,
IO_SCH_ORPH_UNREG,
+ IO_SCH_UNREG_CDEV,
IO_SCH_ATTACH,
IO_SCH_UNREG_ATTACH,
IO_SCH_ORPH_ATTACH,
@@ -1406,7 +1409,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
- return IO_SCH_UNREG;
+ return IO_SCH_UNREG_CDEV;
return IO_SCH_DISC;
}
if (device_is_disconnected(cdev))
@@ -1468,6 +1471,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev);
break;
+ case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
if (!cdev)
@@ -1501,6 +1505,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
if (rc)
goto out;
break;
+ case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 5ea6249d8180..641f0dbb65a9 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -95,7 +95,7 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
" lgr 1,%[token]\n"
" .insn rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
: [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
- : [state] "d" ((unsigned long)state), [token] "d" (token)
+ : [state] "a" ((unsigned long)state), [token] "d" (token)
: "memory", "cc", "1");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 5a05d1cdfec2..a8def50c149b 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -1293,6 +1293,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return PTR_ERR(kkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
break;
@@ -1426,6 +1427,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kkey, ktp.keylen, &ktp.protkey);
DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
kfree(apqns);
+ memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
break;
@@ -1552,6 +1554,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
protkey, &protkeylen);
DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
kfree(apqns);
+ memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc) {
kfree(protkey);
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 8acb9eba691b..c2096e4bba31 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -771,14 +771,6 @@ static int __init ism_init(void)
static void __exit ism_exit(void)
{
- struct ism_dev *ism;
-
- mutex_lock(&ism_dev_list.mutex);
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- ism_dev_exit(ism);
- }
- mutex_unlock(&ism_dev_list.mutex);
-
pci_unregister_driver(&ism_driver);
debug_unregister(ism_debug_info);
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 5e115e8b2ba4..7c6efde75da6 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1678,6 +1678,7 @@ struct aac_dev
u32 handle_pci_error;
bool init_reset;
u8 soft_reset_support;
+ u8 use_map_queue;
};
#define aac_adapter_interrupt(dev) \
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index deb32c9f4b3e..3f062e4013ab 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -223,8 +223,12 @@ int aac_fib_setup(struct aac_dev * dev)
struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
{
struct fib *fibptr;
+ u32 blk_tag;
+ int i;
- fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ i = blk_mq_unique_tag_to_tag(blk_tag);
+ fibptr = &dev->fibs[i];
/*
* Null out fields that depend on being zero at the start of
* each I/O
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 68f4dbcfff49..c4a36c0be527 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -19,6 +19,7 @@
#include <linux/compat.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq-pci.h>
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -504,6 +505,15 @@ common_config:
return 0;
}
+static void aac_map_queues(struct Scsi_Host *shost)
+{
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+ blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ aac->pdev, 0);
+ aac->use_map_queue = true;
+}
+
/**
* aac_change_queue_depth - alter queue depths
* @sdev: SCSI device we are considering
@@ -1488,6 +1498,7 @@ static const struct scsi_host_template aac_driver_template = {
.bios_param = aac_biosparm,
.shost_groups = aac_host_groups,
.slave_configure = aac_slave_configure,
+ .map_queues = aac_map_queues,
.change_queue_depth = aac_change_queue_depth,
.sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
@@ -1775,6 +1786,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = AAC_MAX_LUN;
pci_set_drvdata(pdev, shost);
+ shost->nr_hw_queues = aac->max_msix;
+ shost->host_tagset = 1;
error = scsi_add_host(shost, &pdev->dev);
if (error)
@@ -1906,6 +1919,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
aac_cancel_rescan_worker(aac);
+ aac->use_map_queue = false;
scsi_remove_host(shost);
__aac_shutdown(aac);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 11ef58204e96..61949f374188 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -493,6 +493,10 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 vector_no;
+ struct scsi_cmnd *scmd;
+ u32 blk_tag;
+ struct Scsi_Host *shost = dev->scsi_host_ptr;
+ struct blk_mq_queue_map *qmap;
atomic_inc(&q->numpending);
@@ -505,8 +509,25 @@ static int aac_src_deliver_message(struct fib *fib)
if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
&& dev->sa_firmware)
vector_no = aac_get_vector(dev);
- else
- vector_no = fib->vector_no;
+ else {
+ if (!fib->vector_no || !fib->callback_data) {
+ if (shost && dev->use_map_queue) {
+ qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ vector_no = qmap->mq_map[raw_smp_processor_id()];
+ }
+ /*
+ * We hardcode the vector_no for
+ * reserved commands as a valid shost is
+ * absent during the init
+ */
+ else
+ vector_no = 0;
+ } else {
+ scmd = (struct scsi_cmnd *)fib->callback_data;
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
+ }
+ }
if (native_hba) {
if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index ac648bb8f7e7..cb0a399be1cc 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -877,7 +877,8 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, file->f_mode, cmd, argp);
+ return scsi_ioctl(ch->device, file->f_mode & FMODE_WRITE, cmd,
+ argp);
}
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 9a322a3a2150..595dca92e8db 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -889,7 +889,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocbq)
{
uint32_t evt_req_id = 0;
- uint32_t cmd;
+ u16 cmd;
struct lpfc_dmabuf *dmabuf = NULL;
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
@@ -915,7 +915,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
evt_req_id = ct_req->FsType;
- cmd = ct_req->CommandResponse.bits.CmdRsp;
+ cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
@@ -3186,8 +3186,8 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
ctreq->RevisionId.bits.InId = 0;
ctreq->FsType = SLI_CT_ELX_LOOPBACK;
ctreq->FsSubType = 0;
- ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
- ctreq->CommandResponse.bits.Size = size;
+ ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA);
+ ctreq->CommandResponse.bits.Size = cpu_to_be16(size);
segment_offset = ELX_LOOPBACK_HEADER_SZ;
} else
segment_offset = 0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index df5e5b7fdcfe..84aa3571be6d 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3796,6 +3796,7 @@ struct qla_qpair {
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
uint16_t cpuid;
+ bool cpu_mapped;
struct qla_fw_resources fwres ____cacheline_aligned;
struct qla_buf_pool buf_pool;
u32 cmd_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ec0423ec6681..1a955c3ff3d6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -9426,6 +9426,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
+ if (!qpair->cpu_mapped)
+ qla_cpu_update(qpair, raw_smp_processor_id());
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
qpair->difdix_supported = 1;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index cce6e425c121..7b42558a8839 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -539,11 +539,14 @@ qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
if (!ha->qp_cpu_map)
return;
mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
+ if (!mask)
+ return;
qpair->cpuid = cpumask_first(mask);
for_each_cpu(cpu, mask) {
ha->qp_cpu_map[cpu] = qpair;
}
msix->cpuid = qpair->cpuid;
+ qpair->cpu_mapped = true;
}
static inline void
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 71feda2cdb63..245e3a5d81fd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3770,6 +3770,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
rsp->qpair->rcv_intr = 1;
+
+ if (!rsp->qpair->cpu_mapped)
+ qla_cpu_update(rsp->qpair, raw_smp_processor_id());
}
#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 96ee35256a16..a9a9ec086a7e 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -10,7 +10,7 @@
#define uptr64(val) ((void __user *)(uintptr_t)(val))
static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
- fmode_t mode, unsigned int timeout)
+ bool open_for_write, unsigned int timeout)
{
struct scsi_cmnd *scmd;
struct request *rq;
@@ -42,7 +42,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len))
goto out_put_request;
ret = -EPERM;
- if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, open_for_write))
goto out_put_request;
ret = 0;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index e3b31d32b6a9..6f6c5973c3ea 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -248,7 +248,7 @@ static int scsi_send_start_stop(struct scsi_device *sdev, int data)
* Only a subset of commands are allowed for unprivileged users. Commands used
* to format the media, update the firmware, etc. are not permitted.
*/
-bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
+bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write)
{
/* root can do any command. */
if (capable(CAP_SYS_RAWIO))
@@ -338,7 +338,7 @@ bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
case GPCMD_SET_READ_AHEAD:
/* ZBC */
case ZBC_OUT:
- return (mode & FMODE_WRITE);
+ return open_for_write;
default:
return false;
}
@@ -346,7 +346,7 @@ bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
EXPORT_SYMBOL(scsi_cmd_allowed);
static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
- struct sg_io_hdr *hdr, fmode_t mode)
+ struct sg_io_hdr *hdr, bool open_for_write)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -354,7 +354,7 @@ static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
return -EMSGSIZE;
if (copy_from_user(scmd->cmnd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, open_for_write))
return -EPERM;
scmd->cmd_len = hdr->cmd_len;
@@ -407,7 +407,8 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return ret;
}
-static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
+static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr,
+ bool open_for_write)
{
unsigned long start_time;
ssize_t ret = 0;
@@ -448,7 +449,7 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
goto out_put_request;
}
- ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
+ ret = scsi_fill_sghdr_rq(sdev, rq, hdr, open_for_write);
if (ret < 0)
goto out_put_request;
@@ -477,8 +478,7 @@ out_put_request:
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
* @q: request queue to send scsi commands down
- * @mode: mode used to open the file through which the ioctl has been
- * submitted
+ * @open_for_write: is the file / block device opened for writing?
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
@@ -501,7 +501,7 @@ out_put_request:
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
-static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
+static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write,
struct scsi_ioctl_command __user *sic)
{
struct request *rq;
@@ -554,7 +554,7 @@ static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
goto error;
err = -EPERM;
- if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, open_for_write))
goto error;
/* default. possible overridden later */
@@ -776,7 +776,7 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
return 0;
}
-static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, bool open_for_write,
void __user *arg)
{
struct cdrom_generic_command cgc;
@@ -817,7 +817,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(sdev, &hdr, mode);
+ err = sg_io(sdev, &hdr, open_for_write);
if (err == -EFAULT)
return -EFAULT;
@@ -832,7 +832,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
return err;
}
-static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write,
void __user *argp)
{
struct sg_io_hdr hdr;
@@ -841,7 +841,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
error = get_sg_io_hdr(&hdr, argp);
if (error)
return error;
- error = sg_io(sdev, &hdr, mode);
+ error = sg_io(sdev, &hdr, open_for_write);
if (error == -EFAULT)
return error;
if (put_sg_io_hdr(&hdr, argp))
@@ -852,7 +852,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
/**
* scsi_ioctl - Dispatch ioctl to scsi device
* @sdev: scsi device receiving ioctl
- * @mode: mode the block/char device is opened with
+ * @open_for_write: is the file / block device opened for writing?
* @cmd: which ioctl is it
* @arg: data associated with ioctl
*
@@ -860,7 +860,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
*/
-int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
void __user *arg)
{
struct request_queue *q = sdev->request_queue;
@@ -896,11 +896,11 @@ int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
case SG_EMULATED_HOST:
return sg_emulated_host(q, arg);
case SG_IO:
- return scsi_ioctl_sg_io(sdev, mode, arg);
+ return scsi_ioctl_sg_io(sdev, open_for_write, arg);
case SCSI_IOCTL_SEND_COMMAND:
- return sg_scsi_ioctl(q, mode, arg);
+ return sg_scsi_ioctl(q, open_for_write, arg);
case CDROM_SEND_PACKET:
- return scsi_cdrom_send_packet(sdev, mode, arg);
+ return scsi_cdrom_send_packet(sdev, open_for_write, arg);
case CDROMCLOSETRAY:
return scsi_send_start_stop(sdev, 3);
case CDROMEJECT:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b7c569a42aa4..0226c9279cef 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1463,6 +1463,8 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
struct Scsi_Host *host = cmd->device->host;
int rtn = 0;
+ atomic_inc(&cmd->device->iorequest_cnt);
+
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
/* in SDEV_DEL we error all commands. DID_NO_CONNECT
@@ -1483,6 +1485,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
"queuecommand : device blocked\n"));
+ atomic_dec(&cmd->device->iorequest_cnt);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
@@ -1515,6 +1518,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(host, cmd);
if (rtn) {
+ atomic_dec(&cmd->device->iorequest_cnt);
trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
@@ -1761,7 +1765,6 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_dec_host_busy;
}
- atomic_inc(&cmd->device->iorequest_cnt);
return BLK_STS_OK;
out_dec_host_busy:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1624d528aa1f..ab216976dbdc 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1280,11 +1280,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
mempool_free(rq->special_vec.bv_page, sd_page_pool);
}
-static bool sd_need_revalidate(struct block_device *bdev,
- struct scsi_disk *sdkp)
+static bool sd_need_revalidate(struct gendisk *disk, struct scsi_disk *sdkp)
{
if (sdkp->device->removable || sdkp->write_prot) {
- if (bdev_check_media_change(bdev))
+ if (disk_check_media_change(disk))
return true;
}
@@ -1293,13 +1292,13 @@ static bool sd_need_revalidate(struct block_device *bdev,
* nothing to do with partitions, BLKRRPART is used to force a full
* revalidate after things like a format for historical reasons.
*/
- return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ return test_bit(GD_NEED_PART_SCAN, &disk->state);
}
/**
* sd_open - open a scsi disk device
- * @bdev: Block device of the scsi disk to open
- * @mode: FMODE_* mask
+ * @disk: disk to open
+ * @mode: open mode
*
* Returns 0 if successful. Returns a negated errno value in case
* of error.
@@ -1309,11 +1308,11 @@ static bool sd_need_revalidate(struct block_device *bdev,
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
*
- * Locking: called with bdev->bd_disk->open_mutex held.
+ * Locking: called with disk->open_mutex held.
**/
-static int sd_open(struct block_device *bdev, fmode_t mode)
+static int sd_open(struct gendisk *disk, blk_mode_t mode)
{
- struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdev = sdkp->device;
int retval;
@@ -1330,14 +1329,15 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_block_when_processing_errors(sdev))
goto error_out;
- if (sd_need_revalidate(bdev, sdkp))
- sd_revalidate_disk(bdev->bd_disk);
+ if (sd_need_revalidate(disk, sdkp))
+ sd_revalidate_disk(disk);
/*
* If the drive is empty, just let the open fail.
*/
retval = -ENOMEDIUM;
- if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
+ if (sdev->removable && !sdkp->media_present &&
+ !(mode & BLK_OPEN_NDELAY))
goto error_out;
/*
@@ -1345,7 +1345,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
* if the user expects to be able to write to the thing.
*/
retval = -EROFS;
- if (sdkp->write_prot && (mode & FMODE_WRITE))
+ if (sdkp->write_prot && (mode & BLK_OPEN_WRITE))
goto error_out;
/*
@@ -1374,16 +1374,15 @@ error_out:
* sd_release - invoked when the (last) close(2) is called on this
* scsi disk.
* @disk: disk to release
- * @mode: FMODE_* mask
*
* Returns 0.
*
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
*
- * Locking: called with bdev->bd_disk->open_mutex held.
+ * Locking: called with disk->open_mutex held.
**/
-static void sd_release(struct gendisk *disk, fmode_t mode)
+static void sd_release(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdev = sdkp->device;
@@ -1426,7 +1425,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/**
* sd_ioctl - process an ioctl
* @bdev: target block device
- * @mode: FMODE_* mask
+ * @mode: open mode
* @cmd: ioctl command number
* @arg: this is third argument given to ioctl(2) system call.
* Often contains a pointer.
@@ -1437,7 +1436,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+static int sd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct gendisk *disk = bdev->bd_disk;
@@ -1459,13 +1458,13 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
* access to the device is prohibited.
*/
error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
- (mode & FMODE_NDELAY) != 0);
+ (mode & BLK_OPEN_NDELAY));
if (error)
return error;
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
- return scsi_ioctl(sdp, mode, cmd, p);
+ return scsi_ioctl(sdp, mode & BLK_OPEN_WRITE, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 037f8c98a6d3..dcb73787c29d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -237,7 +237,7 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- if (!scsi_cmd_allowed(cmd, filp->f_mode))
+ if (!scsi_cmd_allowed(cmd, filp->f_mode & FMODE_WRITE))
return -EPERM;
return 0;
}
@@ -1103,7 +1103,8 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE,
+ cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1159,7 +1160,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
- return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode & FMODE_WRITE, cmd_in, p);
}
static __poll_t
@@ -1496,6 +1497,10 @@ sg_add_device(struct device *cl_dev)
int error;
unsigned long iflags;
+ error = blk_get_queue(scsidp->request_queue);
+ if (error)
+ return error;
+
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
@@ -1553,6 +1558,7 @@ cdev_add_err:
out:
if (cdev)
cdev_del(cdev);
+ blk_put_queue(scsidp->request_queue);
return error;
}
@@ -1560,6 +1566,7 @@ static void
sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
+ struct request_queue *q = sdp->device->request_queue;
unsigned long flags;
/* CAUTION! Note that the device can still be found via idr_find()
@@ -1567,6 +1574,9 @@ sg_device_destroy(struct kref *kref)
* any other cleanup.
*/
+ blk_trace_remove(q);
+ blk_put_queue(q);
+
write_lock_irqsave(&sg_index_lock, flags);
idr_remove(&sg_index_idr, sdp->index);
write_unlock_irqrestore(&sg_index_lock, flags);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 12869e6d4ebd..ce886c8c9dbe 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -484,9 +484,9 @@ static void sr_revalidate_disk(struct scsi_cd *cd)
get_sectorsize(cd);
}
-static int sr_block_open(struct block_device *bdev, fmode_t mode)
+static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
{
- struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_cd *cd = scsi_cd(disk);
struct scsi_device *sdev = cd->device;
int ret;
@@ -494,11 +494,11 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
return -ENXIO;
scsi_autopm_get_device(sdev);
- if (bdev_check_media_change(bdev))
+ if (disk_check_media_change(disk))
sr_revalidate_disk(cd);
mutex_lock(&cd->lock);
- ret = cdrom_open(&cd->cdi, bdev, mode);
+ ret = cdrom_open(&cd->cdi, mode);
mutex_unlock(&cd->lock);
scsi_autopm_put_device(sdev);
@@ -507,19 +507,19 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static void sr_block_release(struct gendisk *disk, fmode_t mode)
+static void sr_block_release(struct gendisk *disk)
{
struct scsi_cd *cd = scsi_cd(disk);
mutex_lock(&cd->lock);
- cdrom_release(&cd->cdi, mode);
+ cdrom_release(&cd->cdi);
mutex_unlock(&cd->lock);
scsi_device_put(cd->device);
}
-static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
- unsigned long arg)
+static int sr_block_ioctl(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg)
{
struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
struct scsi_device *sdev = cd->device;
@@ -532,18 +532,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
mutex_lock(&cd->lock);
ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
- (mode & FMODE_NDELAY) != 0);
+ (mode & BLK_OPEN_NDELAY));
if (ret)
goto out;
scsi_autopm_get_device(sdev);
if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
- ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+ ret = cdrom_ioctl(&cd->cdi, bdev, cmd, arg);
if (ret != -ENOSYS)
goto put;
}
- ret = scsi_ioctl(sdev, mode, cmd, argp);
+ ret = scsi_ioctl(sdev, mode & BLK_OPEN_WRITE, cmd, argp);
put:
scsi_autopm_put_device(sdev);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b90a440e135d..14d7981ddcdd 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3832,7 +3832,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
break;
}
- retval = scsi_ioctl(STp->device, file->f_mode, cmd_in, p);
+ retval = scsi_ioctl(STp->device, file->f_mode & FMODE_WRITE, cmd_in, p);
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
/* unload */
STp->rew_at_close = 0;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 5b230e149c3d..8ffb75be99bc 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -109,7 +109,9 @@ enum {
TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
TASK_ATTRIBUTE_ORDERED = 0x2,
TASK_ATTRIBUTE_ACA = 0x4,
+};
+enum {
SS_STS_NORMAL = 0x80000000,
SS_STS_DONE = 0x40000000,
SS_STS_HANDSHAKE = 0x20000000,
@@ -121,7 +123,9 @@ enum {
SS_I2H_REQUEST_RESET = 0x2000,
SS_MU_OPERATIONAL = 0x80000000,
+};
+enum {
STEX_CDB_LENGTH = 16,
STATUS_VAR_LEN = 128,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d9ce379c4d2e..659196a2f63a 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1567,6 +1567,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
+ /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */
+ sdevice->no_report_opcodes = 1;
sdevice->no_write_same = 1;
/*
@@ -1780,7 +1782,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
- payload_sz = sizeof(cmd_request->mpb);
+ payload_sz = 0;
if (scsi_sg_count(scmnd)) {
unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
@@ -1789,10 +1791,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
unsigned long hvpfn, hvpfns_to_add;
int j, i = 0, sg_count;
- if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
+ payload_sz = (hvpg_count * sizeof(u64) +
+ sizeof(struct vmbus_packet_mpb_array));
- payload_sz = (hvpg_count * sizeof(u64) +
- sizeof(struct vmbus_packet_mpb_array));
+ if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY;
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index 7268c2fbcbc1..e0d096607fef 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -36,7 +36,7 @@ config UCC
config CPM_TSA
tristate "CPM TSA support"
depends on OF && HAS_IOMEM
- depends on CPM1 || COMPILE_TEST
+ depends on CPM1 || (CPM && COMPILE_TEST)
help
Freescale CPM Time Slot Assigner (TSA)
controller.
@@ -47,7 +47,7 @@ config CPM_TSA
config CPM_QMC
tristate "CPM QMC support"
depends on OF && HAS_IOMEM
- depends on CPM1 || (FSL_SOC && COMPILE_TEST)
+ depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
depends on CPM_TSA
help
Freescale CPM QUICC Multichannel Controller
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 0f43a88b4894..89b775512bef 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -32,4 +32,5 @@ obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
-obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += ice.o
+qcom_ice-objs += ice.o
+obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index fd58c5b69897..f65bfeca7ed6 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -773,12 +773,12 @@ static int bwmon_probe(struct platform_device *pdev)
bwmon->max_bw_kbps = UINT_MAX;
opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
if (IS_ERR(opp))
- return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n");
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
bwmon->min_bw_kbps = 0;
opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
if (IS_ERR(opp))
- return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
bwmon->dev = dev;
diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c
index dc74d2a19de2..5e3ba0be0903 100644
--- a/drivers/soc/qcom/ramp_controller.c
+++ b/drivers/soc/qcom/ramp_controller.c
@@ -296,7 +296,7 @@ static int qcom_ramp_controller_probe(struct platform_device *pdev)
return -ENOMEM;
qrc->desc = device_get_match_data(&pdev->dev);
- if (!qrc)
+ if (!qrc->desc)
return -EINVAL;
qrc->regmap = devm_regmap_init_mmio(&pdev->dev, base, &qrc_regmap_config);
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index ce48a9f3b4c8..f83811f51175 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -233,6 +233,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
num_vmids = 0;
} else if (num_vmids < 0) {
dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids);
+ ret = num_vmids;
goto remove_cdev;
} else if (num_vmids > NUM_MAX_VMIDS) {
dev_warn(&pdev->dev,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index f93544f6d796..0dd4363ebac8 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1073,7 +1073,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
drv->ver.minor >>= MINOR_VER_SHIFT;
- if (drv->ver.major == 3 && drv->ver.minor >= 0)
+ if (drv->ver.major == 3)
drv->regs = rpmh_rsc_reg_offset_ver_3_0;
else
drv->regs = rpmh_rsc_reg_offset_ver_2_7;
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index f20e2a49a669..63c35a32065b 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -342,6 +342,21 @@ static const struct rpmhpd_desc sm8150_desc = {
.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
};
+static struct rpmhpd *sa8155p_rpmhpds[] = {
+ [SA8155P_CX] = &cx_w_mx_parent,
+ [SA8155P_CX_AO] = &cx_ao_w_mx_parent,
+ [SA8155P_EBI] = &ebi,
+ [SA8155P_GFX] = &gfx,
+ [SA8155P_MSS] = &mss,
+ [SA8155P_MX] = &mx,
+ [SA8155P_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sa8155p_desc = {
+ .rpmhpds = sa8155p_rpmhpds,
+ .num_pds = ARRAY_SIZE(sa8155p_rpmhpds),
+};
+
/* SM8250 RPMH powerdomains */
static struct rpmhpd *sm8250_rpmhpds[] = {
[SM8250_CX] = &cx_w_mx_parent,
@@ -519,6 +534,7 @@ static const struct rpmhpd_desc sc8280xp_desc = {
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,qdu1000-rpmhpd", .data = &qdu1000_desc },
+ { .compatible = "qcom,sa8155p-rpmhpd", .data = &sa8155p_desc },
{ .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc },
{ .compatible = "qcom,sa8775p-rpmhpd", .data = &sa8775p_desc },
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 58ea013fa918..2a1096dab63d 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -100,6 +100,13 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
.driver_data = (void *)intel_tgl_bios,
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8709"),
+ },
+ .driver_data = (void *)intel_tgl_bios,
+ },
+ {
/* quirk used for NUC15 'Bishop County' LAPBC510 and LAPBC710 skews */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index c296e0bf897b..280455f047a3 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1099,8 +1099,10 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
}
sruntime = sdw_alloc_stream(dai->name);
- if (!sruntime)
- return -ENOMEM;
+ if (!sruntime) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
ctrl->sruntime[dai->id] = sruntime;
@@ -1110,12 +1112,19 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
if (ret < 0 && ret != -ENOTSUPP) {
dev_err(dai->dev, "Failed to set sdw stream on %s\n",
codec_dai->name);
- sdw_release_stream(sruntime);
- return ret;
+ goto err_set_stream;
}
}
return 0;
+
+err_set_stream:
+ sdw_release_stream(sruntime);
+err_alloc:
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
+
+ return ret;
}
static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index c2191c07442b..379228f22186 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -2021,8 +2021,10 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
skip_alloc_master_rt:
s_rt = sdw_slave_rt_find(slave, stream);
- if (s_rt)
+ if (s_rt) {
+ alloc_slave_rt = false;
goto skip_alloc_slave_rt;
+ }
s_rt = sdw_slave_rt_alloc(slave, m_rt);
if (!s_rt) {
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 6ddb2dfc0f00..32449bef4415 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1756,8 +1756,11 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->slow_sram = true;
if (of_device_is_compatible(pdev->dev.of_node,
- "xlnx,versal-ospi-1.0"))
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ "xlnx,versal-ospi-1.0")) {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ goto probe_reset_failed;
+ }
}
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index ac85d5562212..26e663369319 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -12,6 +12,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -301,49 +302,43 @@ static int cdns_spi_setup_transfer(struct spi_device *spi,
}
/**
- * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO
* @xspi: Pointer to the cdns_spi structure
+ * @ntx: Number of bytes to pack into the TX FIFO
+ * @nrx: Number of bytes to drain from the RX FIFO
*/
-static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
{
- unsigned long trans_cnt = 0;
+ ntx = clamp(ntx, 0, xspi->tx_bytes);
+ nrx = clamp(nrx, 0, xspi->rx_bytes);
- while ((trans_cnt < xspi->tx_fifo_depth) &&
- (xspi->tx_bytes > 0)) {
+ xspi->tx_bytes -= ntx;
+ xspi->rx_bytes -= nrx;
+ while (ntx || nrx) {
/* When xspi in busy condition, bytes may send failed,
* then spi control did't work thoroughly, add one byte delay
*/
- if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
- CDNS_SPI_IXR_TXFULL)
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL)
udelay(10);
- if (xspi->txbuf)
- cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
- else
- cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
+ if (ntx) {
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+ else
+ cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
- xspi->tx_bytes--;
- trans_cnt++;
- }
-}
+ ntx--;
+ }
-/**
- * cdns_spi_read_rx_fifo - Reads the RX FIFO with as many bytes as possible
- * @xspi: Pointer to the cdns_spi structure
- * @count: Read byte count
- */
-static void cdns_spi_read_rx_fifo(struct cdns_spi *xspi, unsigned long count)
-{
- u8 data;
-
- /* Read out the data from the RX FIFO */
- while (count > 0) {
- data = cdns_spi_read(xspi, CDNS_SPI_RXD);
- if (xspi->rxbuf)
- *xspi->rxbuf++ = data;
- xspi->rx_bytes--;
- count--;
+ if (nrx) {
+ u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+ if (xspi->rxbuf)
+ *xspi->rxbuf++ = data;
+
+ nrx--;
+ }
}
}
@@ -381,33 +376,22 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
spi_finalize_current_transfer(ctlr);
status = IRQ_HANDLED;
} else if (intr_status & CDNS_SPI_IXR_TXOW) {
- int trans_cnt = cdns_spi_read(xspi, CDNS_SPI_THLD);
+ int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD);
+ int trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
+
+ if (threshold > 1)
+ trans_cnt -= threshold;
+
/* Set threshold to one if number of pending are
* less than half fifo
*/
if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1)
cdns_spi_write(xspi, CDNS_SPI_THLD, 1);
- while (trans_cnt) {
- cdns_spi_read_rx_fifo(xspi, 1);
-
- if (xspi->tx_bytes) {
- if (xspi->txbuf)
- cdns_spi_write(xspi, CDNS_SPI_TXD,
- *xspi->txbuf++);
- else
- cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
- xspi->tx_bytes--;
- }
- trans_cnt--;
- }
- if (!xspi->tx_bytes) {
- /* Fixed delay due to controller limitation with
- * RX_NEMPTY incorrect status
- * Xilinx AR:65885 contains more details
- */
- udelay(10);
- cdns_spi_read_rx_fifo(xspi, xspi->rx_bytes);
+ if (xspi->tx_bytes) {
+ cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt);
+ } else {
+ cdns_spi_process_fifo(xspi, 0, trans_cnt);
cdns_spi_write(xspi, CDNS_SPI_IDR,
CDNS_SPI_IXR_DEFAULT);
spi_finalize_current_transfer(ctlr);
@@ -450,16 +434,17 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
xspi->tx_bytes = transfer->len;
xspi->rx_bytes = transfer->len;
- if (!spi_controller_is_slave(ctlr))
+ if (!spi_controller_is_slave(ctlr)) {
cdns_spi_setup_transfer(spi, transfer);
+ } else {
+ /* Set TX empty threshold to half of FIFO depth
+ * only if TX bytes are more than half FIFO depth.
+ */
+ if (xspi->tx_bytes > xspi->tx_fifo_depth)
+ cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
+ }
- /* Set TX empty threshold to half of FIFO depth
- * only if TX bytes are more than half FIFO depth.
- */
- if (xspi->tx_bytes > (xspi->tx_fifo_depth >> 1))
- cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
-
- cdns_spi_fill_tx_fifo(xspi);
+ cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
spi_transfer_delay_exec(transfer);
cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 5e6faa98aa85..15f5e9cb54ad 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -264,17 +264,17 @@ static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable)
struct regmap *syscon = dwsmmio->priv;
u8 cs;
- cs = spi->chip_select;
+ cs = spi_get_chipselect(spi, 0);
if (cs < 2)
- dw_spi_elba_override_cs(syscon, spi->chip_select, enable);
+ dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable);
/*
* The DW SPI controller needs a native CS bit selected to start
* the serial engine.
*/
- spi->chip_select = 0;
+ spi_set_chipselect(spi, 0, 0);
dw_spi_set_cs(spi, enable);
- spi->chip_select = cs;
+ spi_set_chipselect(spi, 0, cs);
}
static int dw_spi_elba_init(struct platform_device *pdev,
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 4339485d202c..674cfe05f411 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1002,7 +1002,9 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
static int dspi_setup(struct spi_device *spi)
{
struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
+ u32 period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->max_speed_hz);
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ u32 quarter_period_ns = DIV_ROUND_UP(period_ns, 4);
u32 cs_sck_delay = 0, sck_cs_delay = 0;
struct fsl_dspi_platform_data *pdata;
unsigned char pasc = 0, asc = 0;
@@ -1031,6 +1033,19 @@ static int dspi_setup(struct spi_device *spi)
sck_cs_delay = pdata->sck_cs_delay;
}
+ /* Since tCSC and tASC apply to continuous transfers too, avoid SCK
+ * glitches of half a cycle by never allowing tCSC + tASC to go below
+ * half a SCK period.
+ */
+ if (cs_sck_delay < quarter_period_ns)
+ cs_sck_delay = quarter_period_ns;
+ if (sck_cs_delay < quarter_period_ns)
+ sck_cs_delay = quarter_period_ns;
+
+ dev_dbg(&spi->dev,
+ "DSPI controller timing params: CS-to-SCK delay %u ns, SCK-to-CS delay %u ns\n",
+ cs_sck_delay, sck_cs_delay);
+
clkrate = clk_get_rate(dspi->clk);
hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index f2341ab99556..4b70038ceb6b 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -910,9 +910,14 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
if (ret == -EPROBE_DEFER)
goto out_pm_get;
-
if (ret < 0)
dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
+ else
+ /*
+ * disable LPSPI module IRQ when enable DMA mode successfully,
+ * to prevent the unexpected LPSPI module IRQ events.
+ */
+ disable_irq(irq);
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index ba7be505ec4e..b293428760bc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -294,6 +294,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
mas->cs_flag = set_flag;
/* set xfer_mode to FIFO to complete cs_done in isr */
mas->cur_xfer_mode = GENI_SE_FIFO;
+ geni_se_select_mode(se, mas->cur_xfer_mode);
+
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -644,6 +646,8 @@ static int spi_geni_init(struct spi_geni_master *mas)
geni_se_select_mode(se, GENI_GPI_DMA);
dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
break;
+ } else if (ret == -EPROBE_DEFER) {
+ goto out_pm;
}
/*
* in case of failure to get gpi dma channel, we can still do the
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 21c321f43766..d7432e2219d8 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
+ if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
+ complete(&mdata->spimem_done);
+
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 944ef6b42bce..00e5e88e72c4 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1028,23 +1028,8 @@ static int spi_qup_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = clk_prepare_enable(cclk);
- if (ret) {
- dev_err(dev, "cannot enable core clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(iclk);
- if (ret) {
- clk_disable_unprepare(cclk);
- dev_err(dev, "cannot enable iface clock\n");
- return ret;
- }
-
master = spi_alloc_master(dev, sizeof(struct spi_qup));
if (!master) {
- clk_disable_unprepare(cclk);
- clk_disable_unprepare(iclk);
dev_err(dev, "cannot allocate master\n");
return -ENOMEM;
}
@@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev)
spin_lock_init(&controller->lock);
init_completion(&controller->done);
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ goto error_dma;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ goto error_dma;
+ }
+
iomode = readl_relaxed(base + QUP_IO_M_MODES);
size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
@@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret) {
dev_err(dev, "cannot set RESET state\n");
- goto error_dma;
+ goto error_clk;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
@@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller);
if (ret)
- goto error_dma;
+ goto error_clk;
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
@@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
+error_clk:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
error_dma:
spi_qup_release_dma(master);
error:
- clk_disable_unprepare(cclk);
- clk_disable_unprepare(iclk);
spi_master_put(master);
return ret;
}
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 63de214916f5..c079368019e8 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -373,7 +373,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
static int ov2680_detect(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
- u32 high, low;
+ u32 high = 0, low = 0;
int ret;
u16 id;
u8 revision;
@@ -383,7 +383,7 @@ static int ov2680_detect(struct i2c_client *client)
ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_H, &high);
if (ret) {
- dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ dev_err(&client->dev, "sensor_id_high read failed (%d)\n", ret);
return -ENODEV;
}
ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_L, &low);
diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
index 32700cb8bc4d..ca2efcc21efe 100644
--- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
@@ -354,7 +354,7 @@ static int imx8mq_mipi_csi_start_stream(struct csi_state *state,
struct v4l2_subdev_state *sd_state)
{
int ret;
- u32 hs_settle;
+ u32 hs_settle = 0;
ret = imx8mq_mipi_csi_sw_reset(state);
if (ret)
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
index 67a0a1f6b922..044e48e3d65f 100644
--- a/drivers/staging/octeon/TODO
+++ b/drivers/staging/octeon/TODO
@@ -6,4 +6,3 @@ TODO:
- make driver self-contained instead of being split between staging and
arch/mips/cavium-octeon.
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 834cce50f9b0..b516c2893420 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -364,8 +364,6 @@ struct iscsi_np *iscsit_add_np(
init_completion(&np->np_restart_comp);
INIT_LIST_HEAD(&np->np_list);
- timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
-
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 274bdd7845ca..90b870f234f0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -811,59 +811,6 @@ void iscsi_post_login_handler(
iscsit_dec_conn_usage_count(conn);
}
-void iscsi_handle_login_thread_timeout(struct timer_list *t)
-{
- struct iscsi_np *np = from_timer(np, t, np_login_timer);
-
- spin_lock_bh(&np->np_thread_lock);
- pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
- &np->np_sockaddr);
-
- if (np->np_login_timer_flags & ISCSI_TF_STOP) {
- spin_unlock_bh(&np->np_thread_lock);
- return;
- }
-
- if (np->np_thread)
- send_sig(SIGINT, np->np_thread, 1);
-
- np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
- spin_unlock_bh(&np->np_thread_lock);
-}
-
-static void iscsi_start_login_thread_timer(struct iscsi_np *np)
-{
- /*
- * This used the TA_LOGIN_TIMEOUT constant because at this
- * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
- */
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_timer_flags &= ~ISCSI_TF_STOP;
- np->np_login_timer_flags |= ISCSI_TF_RUNNING;
- mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
-
- pr_debug("Added timeout timer to iSCSI login request for"
- " %u seconds.\n", TA_LOGIN_TIMEOUT);
- spin_unlock_bh(&np->np_thread_lock);
-}
-
-static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
-{
- spin_lock_bh(&np->np_thread_lock);
- if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
- spin_unlock_bh(&np->np_thread_lock);
- return;
- }
- np->np_login_timer_flags |= ISCSI_TF_STOP;
- spin_unlock_bh(&np->np_thread_lock);
-
- del_timer_sync(&np->np_login_timer);
-
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
- spin_unlock_bh(&np->np_thread_lock);
-}
-
int iscsit_setup_np(
struct iscsi_np *np,
struct sockaddr_storage *sockaddr)
@@ -1123,10 +1070,13 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
spin_lock_init(&conn->nopin_timer_lock);
spin_lock_init(&conn->response_queue_lock);
spin_lock_init(&conn->state_lock);
+ spin_lock_init(&conn->login_worker_lock);
+ spin_lock_init(&conn->login_timer_lock);
timer_setup(&conn->nopin_response_timer,
iscsit_handle_nopin_response_timeout, 0);
timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+ timer_setup(&conn->login_timer, iscsit_login_timeout, 0);
if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
goto free_conn;
@@ -1304,7 +1254,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto new_sess_out;
}
- iscsi_start_login_thread_timer(np);
+ iscsit_start_login_timer(conn, current);
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP;
@@ -1417,8 +1367,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (ret < 0)
goto new_sess_out;
- iscsi_stop_login_thread_timer(np);
-
if (ret == 1) {
tpg_np = conn->tpg_np;
@@ -1434,7 +1382,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
new_sess_out:
new_sess = true;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
+ iscsit_stop_login_timer(conn);
tpg_np = conn->tpg_np;
iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
new_sess = false;
@@ -1448,7 +1396,6 @@ old_sess_out:
return 1;
exit:
- iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 24040c118e49..fa3fb5f4e6bc 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -535,25 +535,6 @@ static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login
iscsi_target_login_sess_out(conn, zero_tsih, true);
}
-struct conn_timeout {
- struct timer_list timer;
- struct iscsit_conn *conn;
-};
-
-static void iscsi_target_login_timeout(struct timer_list *t)
-{
- struct conn_timeout *timeout = from_timer(timeout, t, timer);
- struct iscsit_conn *conn = timeout->conn;
-
- pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
-
- if (conn->login_kworker) {
- pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
- conn->login_kworker->comm, conn->login_kworker->pid);
- send_sig(SIGINT, conn->login_kworker, 1);
- }
-}
-
static void iscsi_target_do_login_rx(struct work_struct *work)
{
struct iscsit_conn *conn = container_of(work,
@@ -562,12 +543,15 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
struct iscsi_np *np = login->np;
struct iscsi_portal_group *tpg = conn->tpg;
struct iscsi_tpg_np *tpg_np = conn->tpg_np;
- struct conn_timeout timeout;
int rc, zero_tsih = login->zero_tsih;
bool state;
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
conn, current->comm, current->pid);
+
+ spin_lock(&conn->login_worker_lock);
+ set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags);
+ spin_unlock(&conn->login_worker_lock);
/*
* If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
* before initial PDU processing in iscsi_target_start_negotiation()
@@ -597,19 +581,16 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
goto err;
}
- conn->login_kworker = current;
allow_signal(SIGINT);
-
- timeout.conn = conn;
- timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0);
- mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
- pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid);
+ rc = iscsit_set_login_timer_kworker(conn, current);
+ if (rc < 0) {
+ /* The login timer has already expired */
+ pr_debug("iscsi_target_do_login_rx, login failed\n");
+ goto err;
+ }
rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
- del_timer_sync(&timeout.timer);
- destroy_timer_on_stack(&timeout.timer);
flush_signals(current);
- conn->login_kworker = NULL;
if (rc < 0)
goto err;
@@ -646,7 +627,17 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
if (iscsi_target_sk_check_and_clear(conn,
LOGIN_FLAGS_WRITE_ACTIVE))
goto err;
+
+ /*
+ * Set the login timer thread pointer to NULL to prevent the
+ * login process from getting stuck if the initiator
+ * stops sending data.
+ */
+ rc = iscsit_set_login_timer_kworker(conn, NULL);
+ if (rc < 0)
+ goto err;
} else if (rc == 1) {
+ iscsit_stop_login_timer(conn);
cancel_delayed_work(&conn->login_work);
iscsi_target_nego_release(conn);
iscsi_post_login_handler(np, conn, zero_tsih);
@@ -656,6 +647,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
err:
iscsi_target_restore_sock_callbacks(conn);
+ iscsit_stop_login_timer(conn);
cancel_delayed_work(&conn->login_work);
iscsi_target_login_drop(conn, login);
iscsit_deaccess_np(np, tpg, tpg_np);
@@ -1130,6 +1122,7 @@ int iscsi_target_locate_portal(
iscsi_target_set_sock_callbacks(conn);
login->np = np;
+ conn->tpg = NULL;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
@@ -1197,7 +1190,6 @@ int iscsi_target_locate_portal(
*/
sessiontype = strncmp(s_buf, DISCOVERY, 9);
if (!sessiontype) {
- conn->tpg = iscsit_global->discovery_tpg;
if (!login->leading_connection)
goto get_target;
@@ -1214,9 +1206,11 @@ int iscsi_target_locate_portal(
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
*/
+ conn->tpg = iscsit_global->discovery_tpg;
if (iscsit_access_np(np, conn->tpg) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ conn->tpg = NULL;
ret = -1;
goto out;
}
@@ -1368,14 +1362,30 @@ int iscsi_target_start_negotiation(
* and perform connection cleanup now.
*/
ret = iscsi_target_do_login(conn, login);
- if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
- ret = -1;
+ if (!ret) {
+ spin_lock(&conn->login_worker_lock);
+
+ if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+ ret = -1;
+ else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) {
+ if (iscsit_set_login_timer_kworker(conn, NULL) < 0) {
+ /*
+ * The timeout has expired already.
+ * Schedule login_work to perform the cleanup.
+ */
+ schedule_delayed_work(&conn->login_work, 0);
+ }
+ }
+
+ spin_unlock(&conn->login_worker_lock);
+ }
if (ret < 0) {
iscsi_target_restore_sock_callbacks(conn);
iscsi_remove_failed_auth_entry(conn);
}
if (ret != 0) {
+ iscsit_stop_login_timer(conn);
cancel_delayed_work_sync(&conn->login_work);
iscsi_target_nego_release(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 26dc8ed3045b..b14835fcb033 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1040,6 +1040,57 @@ void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
spin_unlock_bh(&conn->nopin_timer_lock);
}
+void iscsit_login_timeout(struct timer_list *t)
+{
+ struct iscsit_conn *conn = from_timer(conn, t, login_timer);
+ struct iscsi_login *login = conn->login;
+
+ pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
+
+ spin_lock_bh(&conn->login_timer_lock);
+ login->login_failed = 1;
+
+ if (conn->login_kworker) {
+ pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
+ conn->login_kworker->comm, conn->login_kworker->pid);
+ send_sig(SIGINT, conn->login_kworker, 1);
+ } else {
+ schedule_delayed_work(&conn->login_work, 0);
+ }
+ spin_unlock_bh(&conn->login_timer_lock);
+}
+
+void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr)
+{
+ pr_debug("Login timer started\n");
+
+ conn->login_kworker = kthr;
+ mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
+}
+
+int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr)
+{
+ struct iscsi_login *login = conn->login;
+ int ret = 0;
+
+ spin_lock_bh(&conn->login_timer_lock);
+ if (login->login_failed) {
+ /* The timer has already expired */
+ ret = -1;
+ } else {
+ conn->login_kworker = kthr;
+ }
+ spin_unlock_bh(&conn->login_timer_lock);
+
+ return ret;
+}
+
+void iscsit_stop_login_timer(struct iscsit_conn *conn)
+{
+ pr_debug("Login timer stopped\n");
+ timer_delete_sync(&conn->login_timer);
+}
+
int iscsit_send_tx_data(
struct iscsit_cmd *cmd,
struct iscsit_conn *conn,
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 33ea799a0850..24b8e577575a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -56,6 +56,10 @@ extern void iscsit_handle_nopin_timeout(struct timer_list *t);
extern void __iscsit_start_nopin_timer(struct iscsit_conn *);
extern void iscsit_start_nopin_timer(struct iscsit_conn *);
extern void iscsit_stop_nopin_timer(struct iscsit_conn *);
+extern void iscsit_login_timeout(struct timer_list *t);
+extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr);
+extern void iscsit_stop_login_timer(struct iscsit_conn *);
+extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr);
extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index cc838ffd1294..3c462d69daca 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -90,7 +90,7 @@ static int iblock_configure_device(struct se_device *dev)
struct request_queue *q;
struct block_device *bd = NULL;
struct blk_integrity *bi;
- fmode_t mode;
+ blk_mode_t mode = BLK_OPEN_READ;
unsigned int max_write_zeroes_sectors;
int ret;
@@ -108,13 +108,12 @@ static int iblock_configure_device(struct se_device *dev)
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
- mode = FMODE_READ|FMODE_EXCL;
if (!ib_dev->ibd_readonly)
- mode |= FMODE_WRITE;
+ mode |= BLK_OPEN_WRITE;
else
dev->dev_flags |= DF_READ_ONLY;
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
+ bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto out_free_bioset;
@@ -175,7 +174,7 @@ static int iblock_configure_device(struct se_device *dev)
return 0;
out_blkdev_put:
- blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ blkdev_put(ib_dev->ibd_bd, ib_dev);
out_free_bioset:
bioset_exit(&ib_dev->ibd_bio_set);
out:
@@ -201,7 +200,7 @@ static void iblock_destroy_device(struct se_device *dev)
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bd != NULL)
- blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ blkdev_put(ib_dev->ibd_bd, ib_dev);
bioset_exit(&ib_dev->ibd_bio_set);
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e7425549e39c..0d4f09693ef4 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -366,8 +366,8 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
- bd = blkdev_get_by_path(dev->udev_path,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+ bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ,
+ pdv, NULL);
if (IS_ERR(bd)) {
pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
@@ -377,7 +377,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
- blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ blkdev_put(pdv->pdv_bd, pdv);
scsi_device_put(sd);
return ret;
}
@@ -565,8 +565,7 @@ static void pscsi_destroy_device(struct se_device *dev)
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
pdv->pdv_bd) {
- blkdev_put(pdv->pdv_bd,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ blkdev_put(pdv->pdv_bd, pdv);
pdv->pdv_bd = NULL;
}
/*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 86adff2a86ed..687adc9e086c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -504,6 +504,8 @@ target_setup_session(struct se_portal_group *tpg,
free_sess:
transport_free_session(sess);
+ return ERR_PTR(rc);
+
free_cnt:
target_free_cmd_counter(cmd_cnt);
return ERR_PTR(rc);
diff --git a/drivers/tee/amdtee/amdtee_if.h b/drivers/tee/amdtee/amdtee_if.h
index ff48c3e47375..e2014e21530a 100644
--- a/drivers/tee/amdtee/amdtee_if.h
+++ b/drivers/tee/amdtee/amdtee_if.h
@@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem {
/**
* struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
- * @low_addr: [in] bits [31:0] of the physical address of the TA binary
- * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
- * @size: [in] size of TA binary in bytes
- * @ta_handle: [out] return handle of the loaded TA
+ * @low_addr: [in] bits [31:0] of the physical address of the TA binary
+ * @hi_addr: [in] bits [63:32] of the physical address of the TA binary
+ * @size: [in] size of TA binary in bytes
+ * @ta_handle: [out] return handle of the loaded TA
+ * @return_origin: [out] origin of return code after TEE processing
*/
struct tee_cmd_load_ta {
u32 low_addr;
u32 hi_addr;
u32 size;
u32 ta_handle;
+ u32 return_origin;
};
/**
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index e8cd9aaa3467..e9b63dcb3194 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
- } else if (arg->ret == TEEC_SUCCESS) {
- ret = get_ta_refcount(load_cmd.ta_handle);
- if (!ret) {
- arg->ret_origin = TEEC_ORIGIN_COMMS;
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
-
- /* Unload the TA on error */
- unload_cmd.ta_handle = load_cmd.ta_handle;
- psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
- (void *)&unload_cmd,
- sizeof(unload_cmd), &ret);
- } else {
- set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ } else {
+ arg->ret_origin = load_cmd.return_origin;
+
+ if (arg->ret == TEEC_SUCCESS) {
+ ret = get_ta_refcount(load_cmd.ta_handle);
+ if (!ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+ /* Unload the TA on error */
+ unload_cmd.ta_handle = load_cmd.ta_handle;
+ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+ (void *)&unload_cmd,
+ sizeof(unload_cmd), &ret);
+ } else {
+ set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ }
}
}
mutex_unlock(&ta_refcount_mutex);
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 49702cb08f4f..3861ae06d902 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1004,8 +1004,10 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
- if (res.a0)
+ if (res.a0) {
+ *value_valid = false;
return 0;
+ }
*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
return res.a1;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 4cd7ab707315..19a4b33cb564 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -130,6 +130,14 @@ config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
system and device power allocation. This governor can only
operate on cooling devices that implement the power API.
+config THERMAL_DEFAULT_GOV_BANG_BANG
+ bool "bang_bang"
+ depends on THERMAL_GOV_BANG_BANG
+ help
+ Use the bang_bang governor as default. This throttles the
+ devices one step at the time, taking into account the trip
+ point hysteresis.
+
endchoice
config THERMAL_GOV_FAIR_SHARE
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 3abc2dcef408..756b218880a7 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -282,8 +282,7 @@ static int amlogic_thermal_probe(struct platform_device *pdev)
return ret;
}
- if (devm_thermal_add_hwmon_sysfs(&pdev->dev, pdata->tzd))
- dev_warn(&pdev->dev, "Failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(&pdev->dev, pdata->tzd);
ret = amlogic_thermal_initialize(pdata);
if (ret)
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 0e8dfa6a7757..9f6dc4fc9112 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -231,7 +231,7 @@ static void armada380_init(struct platform_device *pdev,
regmap_write(priv->syscon, data->syscon_control0_off, reg);
}
-static void armada_ap806_init(struct platform_device *pdev,
+static void armada_ap80x_init(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
struct armada_thermal_data *data = priv->data;
@@ -614,7 +614,7 @@ static const struct armada_thermal_data armada380_data = {
};
static const struct armada_thermal_data armada_ap806_data = {
- .init = armada_ap806_init,
+ .init = armada_ap80x_init,
.is_valid_bit = BIT(16),
.temp_shift = 0,
.temp_mask = 0x3ff,
@@ -637,6 +637,30 @@ static const struct armada_thermal_data armada_ap806_data = {
.cpu_nr = 4,
};
+static const struct armada_thermal_data armada_ap807_data = {
+ .init = armada_ap80x_init,
+ .is_valid_bit = BIT(16),
+ .temp_shift = 0,
+ .temp_mask = 0x3ff,
+ .thresh_shift = 3,
+ .hyst_shift = 19,
+ .hyst_mask = 0x3,
+ .coef_b = -128900LL,
+ .coef_m = 394ULL,
+ .coef_div = 1,
+ .inverted = true,
+ .signed_sample = true,
+ .syscon_control0_off = 0x84,
+ .syscon_control1_off = 0x88,
+ .syscon_status_off = 0x8C,
+ .dfx_irq_cause_off = 0x108,
+ .dfx_irq_mask_off = 0x10C,
+ .dfx_overheat_irq = BIT(22),
+ .dfx_server_irq_mask_off = 0x104,
+ .dfx_server_irq_en = BIT(1),
+ .cpu_nr = 4,
+};
+
static const struct armada_thermal_data armada_cp110_data = {
.init = armada_cp110_init,
.is_valid_bit = BIT(10),
@@ -681,6 +705,10 @@ static const struct of_device_id armada_thermal_id_table[] = {
.data = &armada_ap806_data,
},
{
+ .compatible = "marvell,armada-ap807-thermal",
+ .data = &armada_ap807_data,
+ },
+ {
.compatible = "marvell,armada-cp110-thermal",
.data = &armada_cp110_data,
},
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index d8005e9ec992..d4b40869c7d7 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -343,8 +343,7 @@ static int imx8mm_tmu_probe(struct platform_device *pdev)
}
tmu->sensors[i].hw_id = i;
- if (devm_thermal_add_hwmon_sysfs(&pdev->dev, tmu->sensors[i].tzd))
- dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(&pdev->dev, tmu->sensors[i].tzd);
}
platform_set_drvdata(pdev, tmu);
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 839bb9958f60..8d6b4ef23746 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -116,8 +116,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
return ret;
}
- if (devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd))
- dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(&pdev->dev, sensor->tzd);
}
return 0;
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
index 01b80331eab6..dc519a665c18 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c
@@ -203,6 +203,151 @@ end:
}
EXPORT_SYMBOL(acpi_parse_art);
+/*
+ * acpi_parse_psvt - Passive Table (PSVT) for passive cooling
+ *
+ * @handle: ACPI handle of the device which contains PSVT
+ * @psvt_count: the number of valid entries resulted from parsing PSVT
+ * @psvtp: pointer to array of psvt entries
+ *
+ */
+static int acpi_parse_psvt(acpi_handle handle, int *psvt_count, struct psvt **psvtp)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ int nr_bad_entries = 0, revision = 0;
+ union acpi_object *p;
+ acpi_status status;
+ int i, result = 0;
+ struct psvt *psvts;
+
+ if (!acpi_has_method(handle, "PSVT"))
+ return -ENODEV;
+
+ status = acpi_evaluate_object(handle, "PSVT", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buffer.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ result = -EFAULT;
+ goto end;
+ }
+
+ /* first package is the revision number */
+ if (p->package.count > 0) {
+ union acpi_object *prev = &(p->package.elements[0]);
+
+ if (prev->type == ACPI_TYPE_INTEGER)
+ revision = (int)prev->integer.value;
+ } else {
+ result = -EFAULT;
+ goto end;
+ }
+
+ /* Support only version 2 */
+ if (revision != 2) {
+ result = -EFAULT;
+ goto end;
+ }
+
+ *psvt_count = p->package.count - 1;
+ if (!*psvt_count) {
+ result = -EFAULT;
+ goto end;
+ }
+
+ psvts = kcalloc(*psvt_count, sizeof(*psvts), GFP_KERNEL);
+ if (!psvts) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ /* Start index is 1 because the first package is the revision number */
+ for (i = 1; i < p->package.count; i++) {
+ struct acpi_buffer psvt_int_format = { sizeof("RRNNNNNNNNNN"), "RRNNNNNNNNNN" };
+ struct acpi_buffer psvt_str_format = { sizeof("RRNNNNNSNNNN"), "RRNNNNNSNNNN" };
+ union acpi_object *package = &(p->package.elements[i]);
+ struct psvt *psvt = &psvts[i - 1 - nr_bad_entries];
+ struct acpi_buffer *psvt_format = &psvt_int_format;
+ struct acpi_buffer element = { 0, NULL };
+ union acpi_object *knob;
+ struct acpi_device *res;
+ struct psvt *psvt_ptr;
+
+ element.length = ACPI_ALLOCATE_BUFFER;
+ element.pointer = NULL;
+
+ if (package->package.count >= ACPI_NR_PSVT_ELEMENTS) {
+ knob = &(package->package.elements[ACPI_PSVT_CONTROL_KNOB]);
+ } else {
+ nr_bad_entries++;
+ pr_info("PSVT package %d is invalid, ignored\n", i);
+ continue;
+ }
+
+ if (knob->type == ACPI_TYPE_STRING) {
+ psvt_format = &psvt_str_format;
+ if (knob->string.length > ACPI_LIMIT_STR_MAX_LEN - 1) {
+ pr_info("PSVT package %d limit string len exceeds max\n", i);
+ knob->string.length = ACPI_LIMIT_STR_MAX_LEN - 1;
+ }
+ }
+
+ status = acpi_extract_package(&(p->package.elements[i]), psvt_format, &element);
+ if (ACPI_FAILURE(status)) {
+ nr_bad_entries++;
+ pr_info("PSVT package %d is invalid, ignored\n", i);
+ continue;
+ }
+
+ psvt_ptr = (struct psvt *)element.pointer;
+
+ memcpy(psvt, psvt_ptr, sizeof(*psvt));
+
+ /* The limit element can be string or U64 */
+ psvt->control_knob_type = (u64)knob->type;
+
+ if (knob->type == ACPI_TYPE_STRING) {
+ memset(&psvt->limit, 0, sizeof(u64));
+ strncpy(psvt->limit.string, psvt_ptr->limit.str_ptr, knob->string.length);
+ } else {
+ psvt->limit.integer = psvt_ptr->limit.integer;
+ }
+
+ kfree(element.pointer);
+
+ res = acpi_fetch_acpi_dev(psvt->source);
+ if (!res) {
+ nr_bad_entries++;
+ pr_info("Failed to get source ACPI device\n");
+ continue;
+ }
+
+ res = acpi_fetch_acpi_dev(psvt->target);
+ if (!res) {
+ nr_bad_entries++;
+ pr_info("Failed to get target ACPI device\n");
+ continue;
+ }
+ }
+
+ /* don't count bad entries */
+ *psvt_count -= nr_bad_entries;
+
+ if (!*psvt_count) {
+ result = -EFAULT;
+ kfree(psvts);
+ goto end;
+ }
+
+ *psvtp = psvts;
+
+ return 0;
+
+end:
+ kfree(buffer.pointer);
+ return result;
+}
/* get device name from acpi handle */
static void get_single_name(acpi_handle handle, char *name)
@@ -289,6 +434,57 @@ free_trt:
return ret;
}
+static int fill_psvt(char __user *ubuf)
+{
+ int i, ret, count, psvt_len;
+ union psvt_object *psvt_user;
+ struct psvt *psvts;
+
+ ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts);
+ if (ret)
+ return ret;
+
+ psvt_len = count * sizeof(*psvt_user);
+
+ psvt_user = kzalloc(psvt_len, GFP_KERNEL);
+ if (!psvt_user) {
+ ret = -ENOMEM;
+ goto free_psvt;
+ }
+
+ /* now fill in user psvt data */
+ for (i = 0; i < count; i++) {
+ /* userspace psvt needs device name instead of acpi reference */
+ get_single_name(psvts[i].source, psvt_user[i].source_device);
+ get_single_name(psvts[i].target, psvt_user[i].target_device);
+
+ psvt_user[i].priority = psvts[i].priority;
+ psvt_user[i].sample_period = psvts[i].sample_period;
+ psvt_user[i].passive_temp = psvts[i].passive_temp;
+ psvt_user[i].source_domain = psvts[i].source_domain;
+ psvt_user[i].control_knob = psvts[i].control_knob;
+ psvt_user[i].step_size = psvts[i].step_size;
+ psvt_user[i].limit_coeff = psvts[i].limit_coeff;
+ psvt_user[i].unlimit_coeff = psvts[i].unlimit_coeff;
+ psvt_user[i].control_knob_type = psvts[i].control_knob_type;
+ if (psvt_user[i].control_knob_type == ACPI_TYPE_STRING)
+ strncpy(psvt_user[i].limit.string, psvts[i].limit.string,
+ ACPI_LIMIT_STR_MAX_LEN);
+ else
+ psvt_user[i].limit.integer = psvts[i].limit.integer;
+
+ }
+
+ if (copy_to_user(ubuf, psvt_user, psvt_len))
+ ret = -EFAULT;
+
+ kfree(psvt_user);
+
+free_psvt:
+ kfree(psvts);
+ return ret;
+}
+
static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
unsigned long __arg)
{
@@ -298,6 +494,7 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
char __user *arg = (void __user *)__arg;
struct trt *trts = NULL;
struct art *arts = NULL;
+ struct psvt *psvts;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
@@ -336,6 +533,27 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
case ACPI_THERMAL_GET_ART:
return fill_art(arg);
+ case ACPI_THERMAL_GET_PSVT_COUNT:
+ ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts);
+ if (!ret) {
+ kfree(psvts);
+ return put_user(count, (unsigned long __user *)__arg);
+ }
+ return ret;
+
+ case ACPI_THERMAL_GET_PSVT_LEN:
+ /* total length of the data retrieved (count * PSVT entry size) */
+ ret = acpi_parse_psvt(acpi_thermal_rel_handle, &count, &psvts);
+ length = count * sizeof(union psvt_object);
+ if (!ret) {
+ kfree(psvts);
+ return put_user(length, (unsigned long __user *)__arg);
+ }
+ return ret;
+
+ case ACPI_THERMAL_GET_PSVT:
+ return fill_psvt(arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
index 78d942477035..ac376d8f9ee4 100644
--- a/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
+++ b/drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.h
@@ -14,6 +14,16 @@
#define ACPI_THERMAL_GET_TRT _IOR(ACPI_THERMAL_MAGIC, 5, unsigned long)
#define ACPI_THERMAL_GET_ART _IOR(ACPI_THERMAL_MAGIC, 6, unsigned long)
+/*
+ * ACPI_THERMAL_GET_PSVT_COUNT = Number of PSVT entries
+ * ACPI_THERMAL_GET_PSVT_LEN = Total return data size (PSVT count x each
+ * PSVT entry size)
+ * ACPI_THERMAL_GET_PSVT = Get the data as an array of psvt_objects
+ */
+#define ACPI_THERMAL_GET_PSVT_LEN _IOR(ACPI_THERMAL_MAGIC, 7, unsigned long)
+#define ACPI_THERMAL_GET_PSVT_COUNT _IOR(ACPI_THERMAL_MAGIC, 8, unsigned long)
+#define ACPI_THERMAL_GET_PSVT _IOR(ACPI_THERMAL_MAGIC, 9, unsigned long)
+
struct art {
acpi_handle source;
acpi_handle target;
@@ -43,6 +53,32 @@ struct trt {
u64 reserved4;
} __packed;
+#define ACPI_NR_PSVT_ELEMENTS 12
+#define ACPI_PSVT_CONTROL_KNOB 7
+#define ACPI_LIMIT_STR_MAX_LEN 8
+
+struct psvt {
+ acpi_handle source;
+ acpi_handle target;
+ u64 priority;
+ u64 sample_period;
+ u64 passive_temp;
+ u64 source_domain;
+ u64 control_knob;
+ union {
+ /* For limit_type = ACPI_TYPE_INTEGER */
+ u64 integer;
+ /* For limit_type = ACPI_TYPE_STRING */
+ char string[ACPI_LIMIT_STR_MAX_LEN];
+ char *str_ptr;
+ } limit;
+ u64 step_size;
+ u64 limit_coeff;
+ u64 unlimit_coeff;
+ /* Spec calls this field reserved, so we borrow it for type info */
+ u64 control_knob_type; /* ACPI_TYPE_STRING or ACPI_TYPE_INTEGER */
+} __packed;
+
#define ACPI_NR_ART_ELEMENTS 13
/* for usrspace */
union art_object {
@@ -77,6 +113,27 @@ union trt_object {
u64 __data[8];
};
+union psvt_object {
+ struct {
+ char source_device[8];
+ char target_device[8];
+ u64 priority;
+ u64 sample_period;
+ u64 passive_temp;
+ u64 source_domain;
+ u64 control_knob;
+ union {
+ u64 integer;
+ char string[ACPI_LIMIT_STR_MAX_LEN];
+ } limit;
+ u64 step_size;
+ u64 limit_coeff;
+ u64 unlimit_coeff;
+ u64 control_knob_type;
+ };
+ u64 __data[ACPI_NR_PSVT_ELEMENTS];
+};
+
#ifdef __KERNEL__
int acpi_thermal_rel_misc_device_add(acpi_handle handle);
int acpi_thermal_rel_misc_device_remove(acpi_handle handle);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 810231b59dcd..5e1164226ada 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -131,7 +131,7 @@ static ssize_t available_uuids_show(struct device *dev,
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
if (priv->uuid_bitmap & (1 << i))
- length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]);
+ length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]);
}
return length;
@@ -149,7 +149,7 @@ static ssize_t current_uuid_show(struct device *dev,
for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) {
if (priv->os_uuid_mask & BIT(i))
- length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]);
+ length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]);
}
if (length)
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
index a205221ec8df..013f1633f082 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
@@ -15,8 +15,8 @@ static const struct rapl_mmio_regs rapl_mmio_default = {
.reg_unit = 0x5938,
.regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930},
.regs[RAPL_DOMAIN_DRAM] = { 0x58e0, 0x58e8, 0x58ec, 0, 0},
- .limits[RAPL_DOMAIN_PACKAGE] = 2,
- .limits[RAPL_DOMAIN_DRAM] = 2,
+ .limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2),
+ .limits[RAPL_DOMAIN_DRAM] = BIT(POWER_LIMIT2),
};
static int rapl_mmio_cpu_online(unsigned int cpu)
@@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu)
if (topology_physical_package_id(cpu))
return 0;
- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
+ rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
if (!rp) {
- rp = rapl_add_package(cpu, &rapl_mmio_priv);
+ rp = rapl_add_package(cpu, &rapl_mmio_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -42,7 +42,7 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu)
struct rapl_package *rp;
int lead_cpu;
- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
+ rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
if (!rp)
return 0;
@@ -97,6 +97,7 @@ int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc
rapl_regs->regs[domain][reg];
rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain];
}
+ rapl_mmio_priv.type = RAPL_IF_MMIO;
rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit;
rapl_mmio_priv.read_raw = rapl_mmio_read_raw;
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index f99dc7e4ae89..db97499f4f0a 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -398,7 +398,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
spin_lock_init(&sensors->intr_notify_lock);
mutex_init(&sensors->dts_update_lock);
sensors->intr_type = intr_type;
- sensors->tj_max = tj_max;
+ sensors->tj_max = tj_max * 1000;
if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE)
notification = false;
else
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index 791210458606..1c3e590157ec 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -222,8 +222,7 @@ static int k3_bandgap_probe(struct platform_device *pdev)
goto err_alloc;
}
- if (devm_thermal_add_hwmon_sysfs(dev, data[id].tzd))
- dev_warn(dev, "Failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(dev, data[id].tzd);
}
platform_set_drvdata(pdev, bgp);
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index 0b5528804bbd..f59d36de20a0 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -1222,12 +1222,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
return -ENODEV;
}
- auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
- if (IS_ERR(auxadc_base)) {
- of_node_put(auxadc);
- return PTR_ERR(auxadc_base);
- }
-
+ auxadc_base = of_iomap(auxadc, 0);
auxadc_phys_base = of_get_phys_base(auxadc);
of_node_put(auxadc);
@@ -1243,12 +1238,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
return -ENODEV;
}
- apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
- if (IS_ERR(apmixed_base)) {
- of_node_put(apmixedsys);
- return PTR_ERR(apmixed_base);
- }
-
+ apmixed_base = of_iomap(apmixedsys, 0);
apmixed_phys_base = of_get_phys_base(apmixedsys);
of_node_put(apmixedsys);
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index d0a3f95b7884..b693fac2d677 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -19,6 +19,8 @@
#include <linux/thermal.h>
#include <dt-bindings/thermal/mediatek,lvts-thermal.h>
+#include "../thermal_hwmon.h"
+
#define LVTS_MONCTL0(__base) (__base + 0x0000)
#define LVTS_MONCTL1(__base) (__base + 0x0004)
#define LVTS_MONCTL2(__base) (__base + 0x0008)
@@ -996,6 +998,8 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
return PTR_ERR(tz);
}
+ devm_thermal_add_hwmon_sysfs(dev, tz);
+
/*
* The thermal zone pointer will be needed in the
* interrupt handler, we store it in the sensor
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 5749149ae2e4..5ddc39b2be32 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -689,9 +689,7 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
return PTR_ERR(tzd);
}
adc_tm->channels[i].tzd = tzd;
- if (devm_thermal_add_hwmon_sysfs(adc_tm->dev, tzd))
- dev_warn(adc_tm->dev,
- "Failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(adc_tm->dev, tzd);
}
return 0;
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 0f88e98428ac..0e8ebfcd84c5 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -411,22 +411,19 @@ static int qpnp_tm_probe(struct platform_device *pdev)
chip->base = res;
ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not read type\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not read type\n");
ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not read subtype\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not read subtype\n");
ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MAJOR, &dig_major);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not read dig_major\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not read dig_major\n");
if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
&& subtype != QPNP_TM_SUBTYPE_GEN2)) {
@@ -448,20 +445,15 @@ static int qpnp_tm_probe(struct platform_device *pdev)
*/
chip->tz_dev = devm_thermal_of_zone_register(
&pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
- if (IS_ERR(chip->tz_dev)) {
- dev_err(&pdev->dev, "failed to register sensor\n");
- return PTR_ERR(chip->tz_dev);
- }
+ if (IS_ERR(chip->tz_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(chip->tz_dev),
+ "failed to register sensor\n");
ret = qpnp_tm_init(chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "init failed\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "init failed\n");
- if (devm_thermal_add_hwmon_sysfs(&pdev->dev, chip->tz_dev))
- dev_warn(&pdev->dev,
- "Failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(&pdev->dev, chip->tz_dev);
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr,
IRQF_ONESHOT, node->name, chip);
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index e89c6f39a3ae..a941b4241b0a 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -39,26 +39,6 @@ struct tsens_legacy_calibration_format tsens_8916_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_8939_nvmem = {
- .base_len = 8,
- .base_shift = 2,
- .sp_len = 6,
- .mode = { 12, 0 },
- .invalid = { 12, 2 },
- .base = { { 0, 0 }, { 1, 24 } },
- .sp = {
- { { 12, 3 }, { 12, 9 } },
- { { 12, 15 }, { 12, 21 } },
- { { 12, 27 }, { 13, 1 } },
- { { 13, 7 }, { 13, 13 } },
- { { 13, 19 }, { 13, 25 } },
- { { 0, 8 }, { 0, 14 } },
- { { 0, 20 }, { 0, 26 } },
- { { 1, 0 }, { 1, 6 } },
- { { 1, 12 }, { 1, 18 } },
- },
-};
-
struct tsens_legacy_calibration_format tsens_8974_nvmem = {
.base_len = 8,
.base_shift = 2,
@@ -103,22 +83,6 @@ struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_9607_nvmem = {
- .base_len = 8,
- .base_shift = 2,
- .sp_len = 6,
- .mode = { 2, 20 },
- .invalid = { 2, 22 },
- .base = { { 0, 0 }, { 2, 12 } },
- .sp = {
- { { 0, 8 }, { 0, 14 } },
- { { 0, 20 }, { 0, 26 } },
- { { 1, 0 }, { 1, 6 } },
- { { 1, 12 }, { 1, 18 } },
- { { 2, 0 }, { 2, 6 } },
- },
-};
-
static int calibrate_8916(struct tsens_priv *priv)
{
u32 p1[5], p2[5];
@@ -243,6 +207,39 @@ static int calibrate_8974(struct tsens_priv *priv)
return 0;
}
+static int __init init_8226(struct tsens_priv *priv)
+{
+ priv->sensor[0].slope = 2901;
+ priv->sensor[1].slope = 2846;
+ priv->sensor[2].slope = 3038;
+ priv->sensor[3].slope = 2955;
+ priv->sensor[4].slope = 2901;
+ priv->sensor[5].slope = 2846;
+
+ return init_common(priv);
+}
+
+static int __init init_8909(struct tsens_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_sensors; ++i)
+ priv->sensor[i].slope = 3000;
+
+ priv->sensor[0].p1_calib_offset = 0;
+ priv->sensor[0].p2_calib_offset = 0;
+ priv->sensor[1].p1_calib_offset = -10;
+ priv->sensor[1].p2_calib_offset = -6;
+ priv->sensor[2].p1_calib_offset = 0;
+ priv->sensor[2].p2_calib_offset = 0;
+ priv->sensor[3].p1_calib_offset = -9;
+ priv->sensor[3].p2_calib_offset = -9;
+ priv->sensor[4].p1_calib_offset = -8;
+ priv->sensor[4].p2_calib_offset = -10;
+
+ return init_common(priv);
+}
+
static int __init init_8939(struct tsens_priv *priv) {
priv->sensor[0].slope = 2911;
priv->sensor[1].slope = 2789;
@@ -258,7 +255,28 @@ static int __init init_8939(struct tsens_priv *priv) {
return init_common(priv);
}
-/* v0.1: 8916, 8939, 8974, 9607 */
+static int __init init_9607(struct tsens_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_sensors; ++i)
+ priv->sensor[i].slope = 3000;
+
+ priv->sensor[0].p1_calib_offset = 1;
+ priv->sensor[0].p2_calib_offset = 1;
+ priv->sensor[1].p1_calib_offset = -4;
+ priv->sensor[1].p2_calib_offset = -2;
+ priv->sensor[2].p1_calib_offset = 4;
+ priv->sensor[2].p2_calib_offset = 8;
+ priv->sensor[3].p1_calib_offset = -3;
+ priv->sensor[3].p2_calib_offset = -5;
+ priv->sensor[4].p1_calib_offset = -4;
+ priv->sensor[4].p2_calib_offset = -4;
+
+ return init_common(priv);
+}
+
+/* v0.1: 8226, 8909, 8916, 8939, 8974, 9607 */
static struct tsens_features tsens_v0_1_feat = {
.ver_major = VER_0_1,
@@ -313,6 +331,32 @@ static const struct tsens_ops ops_v0_1 = {
.get_temp = get_temp_common,
};
+static const struct tsens_ops ops_8226 = {
+ .init = init_8226,
+ .calibrate = tsens_calibrate_common,
+ .get_temp = get_temp_common,
+};
+
+struct tsens_plat_data data_8226 = {
+ .num_sensors = 6,
+ .ops = &ops_8226,
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
+
+static const struct tsens_ops ops_8909 = {
+ .init = init_8909,
+ .calibrate = tsens_calibrate_common,
+ .get_temp = get_temp_common,
+};
+
+struct tsens_plat_data data_8909 = {
+ .num_sensors = 5,
+ .ops = &ops_8909,
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
+
static const struct tsens_ops ops_8916 = {
.init = init_common,
.calibrate = calibrate_8916,
@@ -356,9 +400,15 @@ struct tsens_plat_data data_8974 = {
.fields = tsens_v0_1_regfields,
};
+static const struct tsens_ops ops_9607 = {
+ .init = init_9607,
+ .calibrate = tsens_calibrate_common,
+ .get_temp = get_temp_common,
+};
+
struct tsens_plat_data data_9607 = {
.num_sensors = 5,
- .ops = &ops_v0_1,
+ .ops = &ops_9607,
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index b822a426066d..51322430f1fe 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -42,28 +42,6 @@ struct tsens_legacy_calibration_format tsens_qcs404_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_8976_nvmem = {
- .base_len = 8,
- .base_shift = 2,
- .sp_len = 6,
- .mode = { 4, 0 },
- .invalid = { 4, 2 },
- .base = { { 0, 0 }, { 2, 8 } },
- .sp = {
- { { 0, 8 }, { 0, 14 } },
- { { 0, 20 }, { 0, 26 } },
- { { 1, 0 }, { 1, 6 } },
- { { 1, 12 }, { 1, 18 } },
- { { 2, 8 }, { 2, 14 } },
- { { 2, 20 }, { 2, 26 } },
- { { 3, 0 }, { 3, 6 } },
- { { 3, 12 }, { 3, 18 } },
- { { 4, 2 }, { 4, 9 } },
- { { 4, 14 }, { 4, 21 } },
- { { 4, 26 }, { 5, 1 } },
- },
-};
-
static int calibrate_v1(struct tsens_priv *priv)
{
u32 p1[10], p2[10];
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index d3218127e617..98c356acfe98 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -134,10 +134,12 @@ int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2,
p1[i] = p1[i] + (base1 << shift);
break;
case TWO_PT_CALIB:
+ case TWO_PT_CALIB_NO_OFFSET:
for (i = 0; i < priv->num_sensors; i++)
p2[i] = (p2[i] + base2) << shift;
fallthrough;
case ONE_PT_CALIB2:
+ case ONE_PT_CALIB2_NO_OFFSET:
for (i = 0; i < priv->num_sensors; i++)
p1[i] = (p1[i] + base1) << shift;
break;
@@ -149,6 +151,18 @@ int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2,
}
}
+ /* Apply calibration offset workaround except for _NO_OFFSET modes */
+ switch (mode) {
+ case TWO_PT_CALIB:
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] += priv->sensor[i].p2_calib_offset;
+ fallthrough;
+ case ONE_PT_CALIB2:
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] += priv->sensor[i].p1_calib_offset;
+ break;
+ }
+
return mode;
}
@@ -254,7 +268,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
if (!priv->sensor[i].slope)
priv->sensor[i].slope = SLOPE_DEFAULT;
- if (mode == TWO_PT_CALIB) {
+ if (mode == TWO_PT_CALIB || mode == TWO_PT_CALIB_NO_OFFSET) {
/*
* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
* temp_120_degc - temp_30_degc (x2 - x1)
@@ -1096,6 +1110,12 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,mdm9607-tsens",
.data = &data_9607,
}, {
+ .compatible = "qcom,msm8226-tsens",
+ .data = &data_8226,
+ }, {
+ .compatible = "qcom,msm8909-tsens",
+ .data = &data_8909,
+ }, {
.compatible = "qcom,msm8916-tsens",
.data = &data_8916,
}, {
@@ -1189,9 +1209,7 @@ static int tsens_register(struct tsens_priv *priv)
if (priv->ops->enable)
priv->ops->enable(priv, i);
- if (devm_thermal_add_hwmon_sysfs(priv->dev, tzd))
- dev_warn(priv->dev,
- "Failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(priv->dev, tzd);
}
/* VER_0 require to set MIN and MAX THRESH
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index dba9cd38f637..2805de1c6827 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -10,6 +10,8 @@
#define ONE_PT_CALIB 0x1
#define ONE_PT_CALIB2 0x2
#define TWO_PT_CALIB 0x3
+#define ONE_PT_CALIB2_NO_OFFSET 0x6
+#define TWO_PT_CALIB_NO_OFFSET 0x7
#define CAL_DEGC_PT1 30
#define CAL_DEGC_PT2 120
#define SLOPE_FACTOR 1000
@@ -57,6 +59,8 @@ struct tsens_sensor {
unsigned int hw_id;
int slope;
u32 status;
+ int p1_calib_offset;
+ int p2_calib_offset;
};
/**
@@ -635,7 +639,7 @@ int get_temp_common(const struct tsens_sensor *s, int *temp);
extern struct tsens_plat_data data_8960;
/* TSENS v0.1 targets */
-extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
+extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8974, data_9607;
/* TSENS v1 targets */
extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index e58756323457..ccc2eea7f9f5 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -31,7 +31,6 @@
#define TMR_DISABLE 0x0
#define TMR_ME 0x80000000
#define TMR_ALPF 0x0c000000
-#define TMR_MSITE_ALL GENMASK(15, 0)
#define REGS_TMTMIR 0x008 /* Temperature measurement interval Register */
#define TMTMIR_DEFAULT 0x0000000f
@@ -51,6 +50,7 @@
* Site Register
*/
#define TRITSR_V BIT(31)
+#define TRITSR_TP5 BIT(9)
#define REGS_V2_TMSAR(n) (0x304 + 16 * (n)) /* TMU monitoring
* site adjustment register
*/
@@ -105,6 +105,11 @@ static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
* within sensor range. TEMP is an 9 bit value representing
* temperature in KelVin.
*/
+
+ regmap_read(qdata->regmap, REGS_TMR, &val);
+ if (!(val & TMR_ME))
+ return -EAGAIN;
+
if (regmap_read_poll_timeout(qdata->regmap,
REGS_TRITSR(qsensor->id),
val,
@@ -113,10 +118,15 @@ static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
10 * USEC_PER_MSEC))
return -ENODATA;
- if (qdata->ver == TMU_VER1)
+ if (qdata->ver == TMU_VER1) {
*temp = (val & GENMASK(7, 0)) * MILLIDEGREE_PER_DEGREE;
- else
- *temp = kelvin_to_millicelsius(val & GENMASK(8, 0));
+ } else {
+ if (val & TRITSR_TP5)
+ *temp = milli_kelvin_to_millicelsius((val & GENMASK(8, 0)) *
+ MILLIDEGREE_PER_DEGREE + 500);
+ else
+ *temp = kelvin_to_millicelsius(val & GENMASK(8, 0));
+ }
return 0;
}
@@ -128,15 +138,7 @@ static const struct thermal_zone_device_ops tmu_tz_ops = {
static int qoriq_tmu_register_tmu_zone(struct device *dev,
struct qoriq_tmu_data *qdata)
{
- int id;
-
- if (qdata->ver == TMU_VER1) {
- regmap_write(qdata->regmap, REGS_TMR,
- TMR_MSITE_ALL | TMR_ME | TMR_ALPF);
- } else {
- regmap_write(qdata->regmap, REGS_V2_TMSR, TMR_MSITE_ALL);
- regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
- }
+ int id, sites = 0;
for (id = 0; id < SITES_MAX; id++) {
struct thermal_zone_device *tzd;
@@ -153,14 +155,24 @@ static int qoriq_tmu_register_tmu_zone(struct device *dev,
if (ret == -ENODEV)
continue;
- regmap_write(qdata->regmap, REGS_TMR, TMR_DISABLE);
return ret;
}
- if (devm_thermal_add_hwmon_sysfs(dev, tzd))
- dev_warn(dev,
- "Failed to add hwmon sysfs attributes\n");
+ if (qdata->ver == TMU_VER1)
+ sites |= 0x1 << (15 - id);
+ else
+ sites |= 0x1 << id;
+
+ devm_thermal_add_hwmon_sysfs(dev, tzd);
+ }
+ if (sites) {
+ if (qdata->ver == TMU_VER1) {
+ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF | sites);
+ } else {
+ regmap_write(qdata->regmap, REGS_V2_TMSR, sites);
+ regmap_write(qdata->regmap, REGS_TMR, TMR_ME | TMR_ALPF_V2);
+ }
}
return 0;
@@ -208,8 +220,6 @@ static int qoriq_tmu_calibration(struct device *dev,
static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
{
- int i;
-
/* Disable interrupt, using polling instead */
regmap_write(data->regmap, REGS_TIER, TIER_DISABLE);
@@ -220,8 +230,6 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
} else {
regmap_write(data->regmap, REGS_V2_TMTMIR, TMTMIR_DEFAULT);
regmap_write(data->regmap, REGS_V2_TEUMR(0), TEUMR0_V2);
- for (i = 0; i < SITES_MAX; i++)
- regmap_write(data->regmap, REGS_V2_TMSAR(i), TMSARA_V2);
}
/* Disable monitoring */
@@ -230,7 +238,7 @@ static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
static const struct regmap_range qoriq_yes_ranges[] = {
regmap_reg_range(REGS_TMR, REGS_TSCFGR),
- regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(3)),
+ regmap_reg_range(REGS_TTRnCR(0), REGS_TTRnCR(15)),
regmap_reg_range(REGS_V2_TEUMR(0), REGS_V2_TEUMR(2)),
regmap_reg_range(REGS_V2_TMSAR(0), REGS_V2_TMSAR(15)),
regmap_reg_range(REGS_IPBRR(0), REGS_IPBRR(1)),
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 42a4724d3920..9029d01e029b 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -35,6 +35,12 @@
#define REG_GEN3_PTAT2 0x60
#define REG_GEN3_PTAT3 0x64
#define REG_GEN3_THSCP 0x68
+#define REG_GEN4_THSFMON00 0x180
+#define REG_GEN4_THSFMON01 0x184
+#define REG_GEN4_THSFMON02 0x188
+#define REG_GEN4_THSFMON15 0x1BC
+#define REG_GEN4_THSFMON16 0x1C0
+#define REG_GEN4_THSFMON17 0x1C4
/* IRQ{STR,MSK,EN} bits */
#define IRQ_TEMP1 BIT(0)
@@ -55,6 +61,7 @@
#define MCELSIUS(temp) ((temp) * 1000)
#define GEN3_FUSE_MASK 0xFFF
+#define GEN4_FUSE_MASK 0xFFF
#define TSC_MAX_NUM 5
@@ -66,6 +73,13 @@ struct equation_coefs {
int b2;
};
+struct rcar_gen3_thermal_priv;
+
+struct rcar_thermal_info {
+ int ths_tj_1;
+ void (*read_fuses)(struct rcar_gen3_thermal_priv *priv);
+};
+
struct rcar_gen3_thermal_tsc {
void __iomem *base;
struct thermal_zone_device *zone;
@@ -79,6 +93,7 @@ struct rcar_gen3_thermal_priv {
struct thermal_zone_device_ops ops;
unsigned int num_tscs;
int ptat[3];
+ const struct rcar_thermal_info *info;
};
static inline u32 rcar_gen3_thermal_read(struct rcar_gen3_thermal_tsc *tsc,
@@ -236,6 +251,62 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static void rcar_gen3_thermal_read_fuses_gen3(struct rcar_gen3_thermal_priv *priv)
+{
+ unsigned int i;
+
+ /*
+ * Set the pseudo calibration points with fused values.
+ * PTAT is shared between all TSCs but only fused for the first
+ * TSC while THCODEs are fused for each TSC.
+ */
+ priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) &
+ GEN3_FUSE_MASK;
+ priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) &
+ GEN3_FUSE_MASK;
+ priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) &
+ GEN3_FUSE_MASK;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+
+ tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) &
+ GEN3_FUSE_MASK;
+ tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) &
+ GEN3_FUSE_MASK;
+ tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) &
+ GEN3_FUSE_MASK;
+ }
+}
+
+static void rcar_gen3_thermal_read_fuses_gen4(struct rcar_gen3_thermal_priv *priv)
+{
+ unsigned int i;
+
+ /*
+ * Set the pseudo calibration points with fused values.
+ * PTAT is shared between all TSCs but only fused for the first
+ * TSC while THCODEs are fused for each TSC.
+ */
+ priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON16) &
+ GEN4_FUSE_MASK;
+ priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON17) &
+ GEN4_FUSE_MASK;
+ priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON15) &
+ GEN4_FUSE_MASK;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+
+ tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON01) &
+ GEN4_FUSE_MASK;
+ tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON02) &
+ GEN4_FUSE_MASK;
+ tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON00) &
+ GEN4_FUSE_MASK;
+ }
+}
+
static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
{
unsigned int i;
@@ -243,7 +314,8 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
/* If fuses are not set, fallback to pseudo values. */
thscp = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_THSCP);
- if ((thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) {
+ if (!priv->info->read_fuses ||
+ (thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) {
/* Default THCODE values in case FUSEs are not set. */
static const int thcodes[TSC_MAX_NUM][3] = {
{ 3397, 2800, 2221 },
@@ -268,29 +340,7 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
return false;
}
- /*
- * Set the pseudo calibration points with fused values.
- * PTAT is shared between all TSCs but only fused for the first
- * TSC while THCODEs are fused for each TSC.
- */
- priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) &
- GEN3_FUSE_MASK;
- priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) &
- GEN3_FUSE_MASK;
- priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) &
- GEN3_FUSE_MASK;
-
- for (i = 0; i < priv->num_tscs; i++) {
- struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
-
- tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) &
- GEN3_FUSE_MASK;
- tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) &
- GEN3_FUSE_MASK;
- tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) &
- GEN3_FUSE_MASK;
- }
-
+ priv->info->read_fuses(priv);
return true;
}
@@ -318,52 +368,65 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_priv *priv,
usleep_range(1000, 2000);
}
-static const int rcar_gen3_ths_tj_1 = 126;
-static const int rcar_gen3_ths_tj_1_m3_w = 116;
+static const struct rcar_thermal_info rcar_m3w_thermal_info = {
+ .ths_tj_1 = 116,
+ .read_fuses = rcar_gen3_thermal_read_fuses_gen3,
+};
+
+static const struct rcar_thermal_info rcar_gen3_thermal_info = {
+ .ths_tj_1 = 126,
+ .read_fuses = rcar_gen3_thermal_read_fuses_gen3,
+};
+
+static const struct rcar_thermal_info rcar_gen4_thermal_info = {
+ .ths_tj_1 = 126,
+ .read_fuses = rcar_gen3_thermal_read_fuses_gen4,
+};
+
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
{
.compatible = "renesas,r8a774a1-thermal",
- .data = &rcar_gen3_ths_tj_1_m3_w,
+ .data = &rcar_m3w_thermal_info,
},
{
.compatible = "renesas,r8a774b1-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen3_thermal_info,
},
{
.compatible = "renesas,r8a774e1-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen3_thermal_info,
},
{
.compatible = "renesas,r8a7795-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen3_thermal_info,
},
{
.compatible = "renesas,r8a7796-thermal",
- .data = &rcar_gen3_ths_tj_1_m3_w,
+ .data = &rcar_m3w_thermal_info,
},
{
.compatible = "renesas,r8a77961-thermal",
- .data = &rcar_gen3_ths_tj_1_m3_w,
+ .data = &rcar_m3w_thermal_info,
},
{
.compatible = "renesas,r8a77965-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen3_thermal_info,
},
{
.compatible = "renesas,r8a77980-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen3_thermal_info,
},
{
.compatible = "renesas,r8a779a0-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen3_thermal_info,
},
{
.compatible = "renesas,r8a779f0-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen4_thermal_info,
},
{
.compatible = "renesas,r8a779g0-thermal",
- .data = &rcar_gen3_ths_tj_1,
+ .data = &rcar_gen4_thermal_info,
},
{},
};
@@ -418,7 +481,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
{
struct rcar_gen3_thermal_priv *priv;
struct device *dev = &pdev->dev;
- const int *ths_tj_1 = of_device_get_match_data(dev);
struct resource *res;
struct thermal_zone_device *zone;
unsigned int i;
@@ -430,6 +492,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->ops = rcar_gen3_tz_of_ops;
+ priv->info = of_device_get_match_data(dev);
platform_set_drvdata(pdev, priv);
if (rcar_gen3_thermal_request_irqs(priv, pdev))
@@ -469,7 +532,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
rcar_gen3_thermal_init(priv, tsc);
- rcar_gen3_thermal_calc_coefs(priv, tsc, *ths_tj_1);
+ rcar_gen3_thermal_calc_coefs(priv, tsc, priv->info->ths_tj_1);
zone = devm_thermal_of_zone_register(dev, i, tsc, &priv->ops);
if (IS_ERR(zone)) {
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 2d3042098445..0d6249b36609 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -227,14 +227,12 @@ sensor_off:
}
EXPORT_SYMBOL_GPL(st_thermal_register);
-int st_thermal_unregister(struct platform_device *pdev)
+void st_thermal_unregister(struct platform_device *pdev)
{
struct st_thermal_sensor *sensor = platform_get_drvdata(pdev);
st_thermal_sensor_off(sensor);
thermal_zone_device_unregister(sensor->thermal_dev);
-
- return 0;
}
EXPORT_SYMBOL_GPL(st_thermal_unregister);
diff --git a/drivers/thermal/st/st_thermal.h b/drivers/thermal/st/st_thermal.h
index d661b2f2ef29..75a84e6ec6a7 100644
--- a/drivers/thermal/st/st_thermal.h
+++ b/drivers/thermal/st/st_thermal.h
@@ -94,7 +94,7 @@ struct st_thermal_sensor {
extern int st_thermal_register(struct platform_device *pdev,
const struct of_device_id *st_thermal_of_match);
-extern int st_thermal_unregister(struct platform_device *pdev);
+extern void st_thermal_unregister(struct platform_device *pdev);
extern const struct dev_pm_ops st_thermal_pm_ops;
#endif /* __STI_RESET_SYSCFG_H */
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index d68596c40be9..e8cfa83b724a 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -172,9 +172,9 @@ static int st_mmap_probe(struct platform_device *pdev)
return st_thermal_register(pdev, st_mmap_thermal_of_match);
}
-static int st_mmap_remove(struct platform_device *pdev)
+static void st_mmap_remove(struct platform_device *pdev)
{
- return st_thermal_unregister(pdev);
+ st_thermal_unregister(pdev);
}
static struct platform_driver st_mmap_thermal_driver = {
@@ -184,7 +184,7 @@ static struct platform_driver st_mmap_thermal_driver = {
.of_match_table = st_mmap_thermal_of_match,
},
.probe = st_mmap_probe,
- .remove = st_mmap_remove,
+ .remove_new = st_mmap_remove,
};
module_platform_driver(st_mmap_thermal_driver);
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 793ddce72132..195f3c5d0b38 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -319,6 +319,11 @@ out:
return ret;
}
+static void sun8i_ths_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static int sun8i_ths_resource_init(struct ths_device *tmdev)
{
struct device *dev = tmdev->dev;
@@ -339,47 +344,35 @@ static int sun8i_ths_resource_init(struct ths_device *tmdev)
if (IS_ERR(tmdev->reset))
return PTR_ERR(tmdev->reset);
- tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ ret = reset_control_deassert(tmdev->reset);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sun8i_ths_reset_control_assert,
+ tmdev->reset);
+ if (ret)
+ return ret;
+
+ tmdev->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
if (IS_ERR(tmdev->bus_clk))
return PTR_ERR(tmdev->bus_clk);
}
if (tmdev->chip->has_mod_clk) {
- tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod");
+ tmdev->mod_clk = devm_clk_get_enabled(&pdev->dev, "mod");
if (IS_ERR(tmdev->mod_clk))
return PTR_ERR(tmdev->mod_clk);
}
- ret = reset_control_deassert(tmdev->reset);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(tmdev->bus_clk);
- if (ret)
- goto assert_reset;
-
ret = clk_set_rate(tmdev->mod_clk, 24000000);
if (ret)
- goto bus_disable;
-
- ret = clk_prepare_enable(tmdev->mod_clk);
- if (ret)
- goto bus_disable;
+ return ret;
ret = sun8i_ths_calibrate(tmdev);
if (ret)
- goto mod_disable;
+ return ret;
return 0;
-
-mod_disable:
- clk_disable_unprepare(tmdev->mod_clk);
-bus_disable:
- clk_disable_unprepare(tmdev->bus_clk);
-assert_reset:
- reset_control_assert(tmdev->reset);
-
- return ret;
}
static int sun8i_h3_thermal_init(struct ths_device *tmdev)
@@ -475,9 +468,7 @@ static int sun8i_ths_register(struct ths_device *tmdev)
if (IS_ERR(tmdev->sensor[i].tzd))
return PTR_ERR(tmdev->sensor[i].tzd);
- if (devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd))
- dev_warn(tmdev->dev,
- "Failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd);
}
return 0;
@@ -530,17 +521,6 @@ static int sun8i_ths_probe(struct platform_device *pdev)
return 0;
}
-static int sun8i_ths_remove(struct platform_device *pdev)
-{
- struct ths_device *tmdev = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(tmdev->mod_clk);
- clk_disable_unprepare(tmdev->bus_clk);
- reset_control_assert(tmdev->reset);
-
- return 0;
-}
-
static const struct ths_thermal_chip sun8i_a83t_ths = {
.sensor_num = 3,
.scale = 705,
@@ -642,7 +622,6 @@ MODULE_DEVICE_TABLE(of, of_ths_match);
static struct platform_driver ths_driver = {
.probe = sun8i_ths_probe,
- .remove = sun8i_ths_remove,
.driver = {
.name = "sun8i-thermal",
.of_match_table = of_ths_match,
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index cb584a5735ed..c243e9d76d3c 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -523,8 +523,7 @@ static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
return 0;
}
- if (devm_thermal_add_hwmon_sysfs(ts->dev, tsc->tzd))
- dev_warn(ts->dev, "failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(ts->dev, tsc->tzd);
return 0;
}
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index 017b0ce52122..f4f1a04f8c0f 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/thermal.h>
+#include "thermal_hwmon.h"
+
struct gadc_thermal_info {
struct device *dev;
struct thermal_zone_device *tz_dev;
@@ -153,6 +155,8 @@ static int gadc_thermal_probe(struct platform_device *pdev)
return ret;
}
+ devm_thermal_add_hwmon_sysfs(&pdev->dev, gti->tz_dev);
+
return 0;
}
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 3d4a787c6b28..17c1bbed734d 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -23,6 +23,8 @@
#define DEFAULT_THERMAL_GOVERNOR "user_space"
#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
+#elif defined(CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG)
+#define DEFAULT_THERMAL_GOVERNOR "bang_bang"
#endif
/* Initial state of a cooling device during binding */
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index fbe55509e307..c3ae44659b81 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -271,11 +271,14 @@ int devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device
ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr),
GFP_KERNEL);
- if (!ptr)
+ if (!ptr) {
+ dev_warn(dev, "Failed to allocate device resource data\n");
return -ENOMEM;
+ }
ret = thermal_add_hwmon_sysfs(tz);
if (ret) {
+ dev_warn(dev, "Failed to add hwmon sysfs attributes\n");
devres_free(ptr);
return ret;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 6a5335931f4d..d414a4b7a94a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -182,8 +182,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
ti_bandgap_write_update_interval(bgp, data->sensor_id,
TI_BANDGAP_UPDATE_INTERVAL_MS);
- if (devm_thermal_add_hwmon_sysfs(bgp->dev, data->ti_thermal))
- dev_warn(bgp->dev, "failed to add hwmon sysfs attributes\n");
+ devm_thermal_add_hwmon_sysfs(bgp->dev, data->ti_thermal);
return 0;
}
diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c
index 3bedecb236e0..14bb6dec6c4b 100644
--- a/drivers/thunderbolt/dma_test.c
+++ b/drivers/thunderbolt/dma_test.c
@@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt)
}
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
- dt->tx_ring ? dt->tx_ring->hop : 0,
+ dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
- dt->rx_ring ? dt->rx_ring->hop : 0);
+ dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret) {
dma_test_free_rings(dt);
return ret;
@@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt)
tb_ring_stop(dt->tx_ring);
ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
- dt->tx_ring ? dt->tx_ring->hop : 0,
+ dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
- dt->rx_ring ? dt->rx_ring->hop : 0);
+ dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index d76e923fbc6a..e58beac44295 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -54,6 +54,26 @@ static int ring_interrupt_index(const struct tb_ring *ring)
return bit;
}
+static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
+{
+ if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
+ iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
+ } else {
+ iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
+ }
+}
+
+static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
+{
+ if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
+ else
+ iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
+}
+
/*
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
*
@@ -61,8 +81,8 @@ static int ring_interrupt_index(const struct tb_ring *ring)
*/
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
- int reg = REG_RING_INTERRUPT_BASE +
- ring_interrupt_index(ring) / 32 * 4;
+ int index = ring_interrupt_index(ring) / 32 * 4;
+ int reg = REG_RING_INTERRUPT_BASE + index;
int interrupt_bit = ring_interrupt_index(ring) & 31;
int mask = 1 << interrupt_bit;
u32 old, new;
@@ -123,7 +143,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
"interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop,
active ? "enabled" : "disabled");
- iowrite32(new, ring->nhi->iobase + reg);
+
+ if (active)
+ iowrite32(new, ring->nhi->iobase + reg);
+ else
+ nhi_mask_interrupt(ring->nhi, mask, index);
}
/*
@@ -136,11 +160,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
int i = 0;
/* disable interrupts */
for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
- iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+ nhi_mask_interrupt(nhi, ~0, 4 * i);
/* clear interrupt status bits */
for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
- ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+ nhi_clear_interrupt(nhi, 4 * i);
}
/* ring helper methods */
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index faef165a919c..6ba295815477 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -93,6 +93,8 @@ struct ring_desc {
#define REG_RING_INTERRUPT_BASE 0x38200
#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+#define REG_RING_INTERRUPT_MASK_CLEAR_BASE 0x38208
+
#define REG_INT_THROTTLING_RATE 0x38c00
/* Interrupt Vector Allocation */
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 7bfbc9ca9ba4..c1af712ca728 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -737,6 +737,7 @@ static void tb_scan_port(struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(port->sw->tb);
struct tb_port *upstream_port;
+ bool discovery = false;
struct tb_switch *sw;
int ret;
@@ -804,8 +805,10 @@ static void tb_scan_port(struct tb_port *port)
* tunnels and know which switches were authorized already by
* the boot firmware.
*/
- if (!tcm->hotplug_active)
+ if (!tcm->hotplug_active) {
dev_set_uevent_suppress(&sw->dev, true);
+ discovery = true;
+ }
/*
* At the moment Thunderbolt 2 and beyond (devices with LC) we
@@ -835,10 +838,14 @@ static void tb_scan_port(struct tb_port *port)
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
- ret = tb_switch_enable_clx(sw, TB_CL1);
- if (ret && ret != -EOPNOTSUPP)
- tb_sw_warn(sw, "failed to enable %s on upstream port\n",
- tb_switch_clx_name(TB_CL1));
+ if (discovery) {
+ tb_sw_dbg(sw, "discovery, not touching CL states\n");
+ } else {
+ ret = tb_switch_enable_clx(sw, TB_CL1);
+ if (ret && ret != -EOPNOTSUPP)
+ tb_sw_warn(sw, "failed to enable %s on upstream port\n",
+ tb_switch_clx_name(TB_CL1));
+ }
if (tb_switch_is_clx_enabled(sw, TB_CL1))
/*
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 9099ae73e78f..4f222673d651 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -526,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
* Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place.
*/
- ret = tb_dp_cm_handshake(in, out, 1500);
+ ret = tb_dp_cm_handshake(in, out, 3000);
if (ret)
return ret;
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index f801b1f5b46c..af0e1c070187 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1012,7 +1012,7 @@ static int brcmuart_probe(struct platform_device *pdev)
of_property_read_u32(np, "clock-frequency", &clk_rate);
/* See if a Baud clock has been specified */
- baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
+ baud_mux_clk = devm_clk_get(dev, "sw_baud");
if (IS_ERR(baud_mux_clk)) {
if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
@@ -1032,7 +1032,7 @@ static int brcmuart_probe(struct platform_device *pdev)
if (clk_rate == 0) {
dev_err(dev, "clock-frequency or clk not defined\n");
ret = -EINVAL;
- goto release_dma;
+ goto err_clk_disable;
}
dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
@@ -1119,6 +1119,8 @@ err1:
serial8250_unregister_port(priv->line);
err:
brcmuart_free_bufs(dev, priv);
+err_clk_disable:
+ clk_disable_unprepare(baud_mux_clk);
release_dma:
if (priv->dma_enabled)
brcmuart_arbitration(priv, 0);
@@ -1133,6 +1135,7 @@ static int brcmuart_remove(struct platform_device *pdev)
hrtimer_cancel(&priv->hrt);
serial8250_unregister_port(priv->line);
brcmuart_free_bufs(&pdev->dev, priv);
+ clk_disable_unprepare(priv->baud_mux_clk);
if (priv->dma_enabled)
brcmuart_arbitration(priv, 0);
return 0;
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 64770c62bbec..b406cba10b0e 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -40,9 +40,13 @@
#define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020
#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021
#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
+
#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+#define PCI_SUBDEVICE_ID_USR_2980 0x0128
+#define PCI_SUBDEVICE_ID_USR_2981 0x0129
+
#define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001
#define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002
#define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004
@@ -829,6 +833,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
(kernel_ulong_t)&bd \
}
+#define USR_DEVICE(devid, sdevid, bd) { \
+ PCI_DEVICE_SUB( \
+ PCI_VENDOR_ID_USR, \
+ PCI_DEVICE_ID_EXAR_##devid, \
+ PCI_VENDOR_ID_EXAR, \
+ PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0, \
+ (kernel_ulong_t)&bd \
+ }
+
static const struct pci_device_id exar_pci_tbl[] = {
EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x),
EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x),
@@ -853,6 +866,10 @@ static const struct pci_device_id exar_pci_tbl[] = {
IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn),
+ /* USRobotics USR298x-OEM PCI Modems */
+ USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
+ USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
+
/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index c55be6fda0ca..e80c4f6551a1 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1920,6 +1920,8 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
#define PCI_VENDOR_ID_ADVANTECH 0x13fe
#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600 0x1600
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611 0x1611
#define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
#define PCI_DEVICE_ID_ADVANTECH_PCI3618 0x3618
#define PCI_DEVICE_ID_ADVANTECH_PCIf618 0xf618
@@ -4085,6 +4087,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
pciserial_resume_one);
static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
+ PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
/* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
{ PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fe8d79c4ae95..c153ba3a018a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -669,6 +669,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_supported);
/**
* serial8250_em485_config() - generic ->rs485_config() callback
* @port: uart port
+ * @termios: termios structure
* @rs485: rs485 settings
*
* Generic callback usable by 8250 uart drivers to activate rs485 settings
diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
index 2509e7f74ccf..89956bbf34d9 100644
--- a/drivers/tty/serial/8250/8250_tegra.c
+++ b/drivers/tty/serial/8250/8250_tegra.c
@@ -113,13 +113,15 @@ static int tegra_uart_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&port8250);
if (ret < 0)
- goto err_clkdisable;
+ goto err_ctrl_assert;
platform_set_drvdata(pdev, uart);
uart->line = ret;
return 0;
+err_ctrl_assert:
+ reset_control_assert(uart->rst);
err_clkdisable:
clk_disable_unprepare(uart->clk);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 398e5aac2e77..71a7a3e724ca 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -450,8 +450,8 @@ config SERIAL_SA1100
help
If you have a machine based on a SA1100/SA1110 StrongARM(R) CPU you
can enable its onboard serial port by enabling this option.
- Please read <file:Documentation/arm/sa1100/serial_uart.rst> for further
- info.
+ Please read <file:Documentation/arch/arm/sa1100/serial_uart.rst> for
+ further info.
config SERIAL_SA1100_CONSOLE
bool "Console on SA1100 serial port"
@@ -762,7 +762,7 @@ config SERIAL_PMACZILOG_CONSOLE
config SERIAL_CPM
tristate "CPM SCC/SMC serial port support"
- depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST)
+ depends on CPM2 || CPM1
select SERIAL_CORE
help
This driver supports the SCC and SMC serial ports on Motorola
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 59e25f2b6632..4b2512eef577 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -606,10 +606,11 @@ static int arc_serial_probe(struct platform_device *pdev)
}
uart->baud = val;
- port->membase = of_iomap(np, 0);
- if (!port->membase)
+ port->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(port->membase)) {
/* No point of dev_err since UART itself is hosed here */
- return -ENXIO;
+ return PTR_ERR(port->membase);
+ }
port->irq = irq_of_parse_and_map(np, 0);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 0577618e78c0..46c03ed71c31 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -19,8 +19,6 @@ struct gpio_desc;
#include "cpm_uart_cpm2.h"
#elif defined(CONFIG_CPM1)
#include "cpm_uart_cpm1.h"
-#elif defined(CONFIG_COMPILE_TEST)
-#include "cpm_uart_cpm2.h"
#endif
#define SERIAL_CPM_MAJOR 204
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c91916e13648..7fd30fcc10c6 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -310,7 +310,7 @@ static const struct lpuart_soc_data ls1021a_data = {
static const struct lpuart_soc_data ls1028a_data = {
.devtype = LS1028A_LPUART,
.iotype = UPIO_MEM32,
- .rx_watermark = 1,
+ .rx_watermark = 0,
};
static struct lpuart_soc_data imx7ulp_data = {
@@ -1495,34 +1495,36 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
static void lpuart32_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long temp, modem;
- struct tty_struct *tty;
- unsigned int cflag = 0;
-
- tty = tty_port_tty_get(&port->state->port);
- if (tty) {
- cflag = tty->termios.c_cflag;
- tty_kref_put(tty);
- }
+ unsigned long temp;
- temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
- modem = lpuart32_read(port, UARTMODIR);
+ temp = lpuart32_read(port, UARTCTRL);
+ /*
+ * LPUART IP now has two known bugs, one is CTS has higher priority than the
+ * break signal, which causes the break signal sending through UARTCTRL_SBK
+ * may impacted by the CTS input if the HW flow control is enabled. It
+ * exists on all platforms we support in this driver.
+ * Another bug is i.MX8QM LPUART may have an additional break character
+ * being sent after SBK was cleared.
+ * To avoid above two bugs, we use Transmit Data Inversion function to send
+ * the break signal instead of UARTCTRL_SBK.
+ */
if (break_state != 0) {
- temp |= UARTCTRL_SBK;
/*
- * LPUART CTS has higher priority than SBK, need to disable CTS before
- * asserting SBK to avoid any interference if flow control is enabled.
+ * Disable the transmitter to prevent any data from being sent out
+ * during break, then invert the TX line to send break.
*/
- if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE)
- lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+ temp &= ~UARTCTRL_TE;
+ lpuart32_write(port, temp, UARTCTRL);
+ temp |= UARTCTRL_TXINV;
+ lpuart32_write(port, temp, UARTCTRL);
} else {
- /* Re-enable the CTS when break off. */
- if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE))
- lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR);
+ /* Disable the TXINV to turn off break and re-enable transmitter. */
+ temp &= ~UARTCTRL_TXINV;
+ lpuart32_write(port, temp, UARTCTRL);
+ temp |= UARTCTRL_TE;
+ lpuart32_write(port, temp, UARTCTRL);
}
-
- lpuart32_write(port, temp, UARTCTRL);
}
static void lpuart_setup_watermark(struct lpuart_port *sport)
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index a58e9277dfad..f1387f1024db 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -250,6 +250,7 @@ lqasc_err_int(int irq, void *_port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
spin_lock_irqsave(&ltq_port->lock, flags);
+ __raw_writel(ASC_IRNCR_EIR, port->membase + LTQ_ASC_IRNCR);
/* clear any pending interrupts */
asc_update_bits(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 08dc3e2a729c..8582479f0211 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1664,19 +1664,18 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
uport->private_data = &port->private_data;
platform_set_drvdata(pdev, port);
- ret = uart_add_one_port(drv, uport);
- if (ret)
- return ret;
-
irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
IRQF_TRIGGER_HIGH, port->name, uport);
if (ret) {
dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
- uart_remove_one_port(drv, uport);
return ret;
}
+ ret = uart_add_one_port(drv, uport);
+ if (ret)
+ return ret;
+
/*
* Set pm_runtime status as ACTIVE so that wakeup_irq gets
* enabled/disabled from dev_pm_arm_wake_irq during system
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c84be40fb8df..4737a8f92c2e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -466,7 +466,7 @@ static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read_iter = tty_read,
.write_iter = tty_write,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
@@ -481,7 +481,7 @@ static const struct file_operations console_fops = {
.llseek = no_llseek,
.read_iter = tty_read,
.write_iter = redirected_tty_write,
- .splice_read = generic_file_splice_read,
+ .splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.poll = tty_poll,
.unlocked_ioctl = tty_ioctl,
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 498ba9c0ee93..829c4be66f3b 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -656,10 +656,17 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
}
}
- /* The vcs_size might have changed while we slept to grab
- * the user buffer, so recheck.
+ /* The vc might have been freed or vcs_size might have changed
+ * while we slept to grab the user buffer, so recheck.
* Return data written up to now on failure.
*/
+ vc = vcs_vc(inode, &viewed);
+ if (!vc) {
+ if (written)
+ break;
+ ret = -ENXIO;
+ goto unlock_out;
+ }
size = vcs_size(vc, attr, false);
if (size < 0) {
if (written)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 202ff71e1b58..51b3c6ae781d 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -150,7 +150,8 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
u32 hba_maxq, rem, tot_queues;
struct Scsi_Host *host = hba->host;
- hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+ /* maxq is 0 based value */
+ hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
rw_queues;
@@ -265,7 +266,7 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
hba->ucdl_dma_addr;
- return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+ return div_u64(addr, ufshcd_get_ucd_size(hba));
}
static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 45fd374fe56c..e7e79f515e14 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2849,10 +2849,10 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
- i * sizeof_utp_transfer_cmd_desc(hba);
+ i * ufshcd_get_ucd_size(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * sizeof_utp_transfer_cmd_desc(hba);
+ i * ufshcd_get_ucd_size(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -3761,7 +3761,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
- ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
+ ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@@ -3861,7 +3861,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
- cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
+ cmd_desc_size = ufshcd_get_ucd_size(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@@ -8452,7 +8452,7 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
{
size_t ucdl_size, utrdl_size;
- ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+ ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
hba->ucdl_dma_addr);
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index ccfaebca6faa..1dcadef933e3 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
else
priv_ep->trb_burst_size = 16;
+ /*
+ * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs
+ * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the
+ * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI
+ * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This
+ * results in data corruption when it crosses the 4K border. The corruption
+ * specifically occurs from the position (4K - (address & 0x7F)) to 4K.
+ *
+ * So force trb_burst_size to 16 at such platform.
+ */
+ if (priv_dev->dev_ver < DEV_VER_V2)
+ priv_ep->trb_burst_size = 16;
+
mult = min_t(u8, mult, EP_CFG_MULT_MAX);
buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 4bb6d304eb4b..311007b1d904 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1928,6 +1928,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
if (request.req.wLength > USBTMC_BUFSIZE)
return -EMSGSIZE;
+ if (request.req.wLength == 0) /* Length-0 requests are never IN */
+ request.req.bRequestType &= ~USB_DIR_IN;
is_in = request.req.bRequestType & USB_DIR_IN;
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index fbb087b728dc..268ccbec88f9 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -172,3 +172,44 @@ void hcd_buffer_free(
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
+
+void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
+ size_t size, gfp_t mem_flags, dma_addr_t *dma)
+{
+ if (size == 0)
+ return NULL;
+
+ if (hcd->localmem_pool)
+ return gen_pool_dma_alloc_align(hcd->localmem_pool,
+ size, dma, PAGE_SIZE);
+
+ /* some USB hosts just use PIO */
+ if (!hcd_uses_dma(hcd)) {
+ *dma = DMA_MAPPING_ERROR;
+ return (void *)__get_free_pages(mem_flags,
+ get_order(size));
+ }
+
+ return dma_alloc_coherent(hcd->self.sysdev,
+ size, dma, mem_flags);
+}
+
+void hcd_buffer_free_pages(struct usb_hcd *hcd,
+ size_t size, void *addr, dma_addr_t dma)
+{
+ if (!addr)
+ return;
+
+ if (hcd->localmem_pool) {
+ gen_pool_free(hcd->localmem_pool,
+ (unsigned long)addr, size);
+ return;
+ }
+
+ if (!hcd_uses_dma(hcd)) {
+ free_pages((unsigned long)addr, get_order(size));
+ return;
+ }
+
+ dma_free_coherent(hcd->self.sysdev, size, addr, dma);
+}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e501a03d6c70..fcf68818e999 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps)
static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
{
struct usb_dev_state *ps = usbm->ps;
+ struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
list_del(&usbm->memlist);
spin_unlock_irqrestore(&ps->lock, flags);
- usb_free_coherent(ps->dev, usbm->size, usbm->mem,
- usbm->dma_handle);
+ hcd_buffer_free_pages(hcd, usbm->size,
+ usbm->mem, usbm->dma_handle);
usbfs_decrease_memory_usage(
usbm->size + sizeof(struct usb_memory));
kfree(usbm);
@@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
- dma_addr_t dma_handle;
+ dma_addr_t dma_handle = DMA_MAPPING_ERROR;
int ret;
ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem;
}
- mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
- &dma_handle);
+ mem = hcd_buffer_alloc_pages(hcd,
+ size, GFP_USER | __GFP_NOWARN, &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
@@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
- if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
+ /*
+ * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
+ * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
+ * whether we are in such cases, and then use remap_pfn_range (or
+ * dma_mmap_coherent) to map normal (or DMA) pages into the user
+ * space, respectively.
+ */
+ if (dma_handle == DMA_MAPPING_ERROR) {
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 0beaab932e7d..d68958e151a7 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1137,7 +1137,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_set_incr_burst_type(dwc);
- dwc3_phy_power_on(dwc);
+ ret = dwc3_phy_power_on(dwc);
if (ret)
goto err_exit_phy;
@@ -1929,6 +1929,11 @@ static int dwc3_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
+ /*
+ * HACK: Clear the driver data, which is currently accessed by parent
+ * glue drivers, before allowing the parent to suspend.
+ */
+ platform_set_drvdata(pdev, NULL);
pm_runtime_set_suspended(&pdev->dev);
dwc3_free_event_buffers(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index d56457c02996..1f043c31a096 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1116,6 +1116,7 @@ struct dwc3_scratchpad_array {
* @dis_metastability_quirk: set to disable metastability quirk.
* @dis_split_quirk: set to disable split boundary.
* @wakeup_configured: set if the device is configured for remote wakeup.
+ * @suspended: set to track suspend event due to U3/L2.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
* @max_cfg_eps: current max number of IN eps used across all USB configs.
@@ -1332,6 +1333,7 @@ struct dwc3 {
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
unsigned wakeup_configured:1;
+ unsigned suspended:1;
u16 imod_interval;
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index e4a2560b9dc0..ebf03468fac4 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -332,6 +332,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
unsigned int current_mode;
unsigned long flags;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
@@ -350,6 +355,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
}
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -395,6 +402,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@@ -414,6 +426,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
}
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -463,6 +477,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = s->private;
unsigned long flags;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -493,6 +512,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
seq_printf(s, "UNKNOWN %d\n", reg);
}
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -509,6 +530,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
unsigned long flags;
u32 testmode = 0;
char buf[32];
+ int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -526,10 +548,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
else
testmode = 0;
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_set_test_mode(dwc, testmode);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return count;
}
@@ -548,12 +576,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
enum dwc3_link_state state;
u32 reg;
u8 speed;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
seq_puts(s, "Not available\n");
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
return 0;
}
@@ -566,6 +600,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
dwc3_gadget_hs_link_string(state));
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -584,6 +620,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
char buf[32];
u32 reg;
u8 speed;
+ int ret;
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
@@ -603,10 +640,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
else
return -EINVAL;
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&dwc->lock, flags);
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
@@ -616,12 +658,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
if (speed < DWC3_DSTS_SUPERSPEED &&
state != DWC3_LINK_STATE_RECOV) {
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
return -EINVAL;
}
dwc3_gadget_set_link_state(dwc, state);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return count;
}
@@ -645,6 +690,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
unsigned long flags;
u32 mdwidth;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
@@ -657,6 +707,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -667,6 +719,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
unsigned long flags;
u32 mdwidth;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
@@ -679,6 +736,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -688,12 +747,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -703,12 +769,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -718,12 +791,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -733,12 +813,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -748,12 +835,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
u32 val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -798,6 +892,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int i;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
if (dep->number <= 1) {
@@ -827,6 +926,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
out:
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -839,6 +940,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
u32 lower_32_bits;
u32 upper_32_bits;
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dwc->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&dwc->lock, flags);
reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
@@ -851,6 +957,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
seq_printf(s, "0x%016llx\n", ep_info);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put_sync(dwc->dev);
+
return 0;
}
@@ -910,6 +1018,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+ dwc->regset->dev = dwc->dev;
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->debug_root = root;
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 959fc925ca7c..79b22abf9727 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -308,7 +308,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
/* Only usable in contexts where the role can not change. */
static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
{
- struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+ struct dwc3 *dwc;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ dwc = platform_get_drvdata(qcom->dwc3);
+
+ /* Core driver may not have probed yet. */
+ if (!dwc)
+ return false;
return dwc->xhci;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index c0ca4d12f95d..b78599dd705c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -198,6 +198,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
list_del(&req->list);
req->remaining = 0;
req->needs_extra_trb = false;
+ req->num_trbs = 0;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -2440,6 +2441,7 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
return -EINVAL;
}
dwc3_resume_gadget(dwc);
+ dwc->suspended = false;
dwc->link_state = DWC3_LINK_STATE_U0;
}
@@ -2699,6 +2701,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
return ret;
}
+static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
+{
+ /*
+ * In the Synopsys DWC_usb31 1.90a programming guide section
+ * 4.1.9, it specifies that for a reconnect after a
+ * device-initiated disconnect requires a core soft reset
+ * (DCTL.CSftRst) before enabling the run/stop bit.
+ */
+ dwc3_core_soft_reset(dwc);
+
+ dwc3_event_buffers_setup(dwc);
+ __dwc3_gadget_start(dwc);
+ return dwc3_gadget_run_stop(dwc, true);
+}
+
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
@@ -2737,21 +2754,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
synchronize_irq(dwc->irq_gadget);
- if (!is_on) {
+ if (!is_on)
ret = dwc3_gadget_soft_disconnect(dwc);
- } else {
- /*
- * In the Synopsys DWC_usb31 1.90a programming guide section
- * 4.1.9, it specifies that for a reconnect after a
- * device-initiated disconnect requires a core soft reset
- * (DCTL.CSftRst) before enabling the run/stop bit.
- */
- dwc3_core_soft_reset(dwc);
-
- dwc3_event_buffers_setup(dwc);
- __dwc3_gadget_start(dwc);
- ret = dwc3_gadget_run_stop(dwc, true);
- }
+ else
+ ret = dwc3_gadget_soft_connect(dwc);
pm_runtime_put(dwc->dev);
@@ -3938,6 +3944,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
+ dwc->suspended = false;
+
dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -3962,6 +3970,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
+ dwc->suspended = false;
+
/*
* Ideally, dwc3_reset_gadget() would trigger the function
* drivers to stop any active transfers through ep disable.
@@ -4180,6 +4190,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, unsigned int evtinfo)
{
+ dwc->suspended = false;
+
/*
* TODO take core out of low power mode when that's
* implemented.
@@ -4277,6 +4289,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
if (dwc->gadget->wakeup_armed) {
dwc3_gadget_enable_linksts_evts(dwc, false);
dwc3_resume_gadget(dwc);
+ dwc->suspended = false;
}
break;
case DWC3_LINK_STATE_U1:
@@ -4303,8 +4316,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
- if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+ if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
+ dwc->suspended = true;
dwc3_suspend_gadget(dwc);
+ }
dwc->link_state = next;
}
@@ -4655,42 +4670,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
unsigned long flags;
+ int ret;
if (!dwc->gadget_driver)
return 0;
- dwc3_gadget_run_stop(dwc, false);
+ ret = dwc3_gadget_soft_disconnect(dwc);
+ if (ret)
+ goto err;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_disconnect_gadget(dwc);
- __dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
+
+err:
+ /*
+ * Attempt to reset the controller's state. Likely no
+ * communication can be established until the host
+ * performs a port reset.
+ */
+ if (dwc->softconnect)
+ dwc3_gadget_soft_connect(dwc);
+
+ return ret;
}
int dwc3_gadget_resume(struct dwc3 *dwc)
{
- int ret;
-
if (!dwc->gadget_driver || !dwc->softconnect)
return 0;
- ret = __dwc3_gadget_start(dwc);
- if (ret < 0)
- goto err0;
-
- ret = dwc3_gadget_run_stop(dwc, true);
- if (ret < 0)
- goto err1;
-
- return 0;
-
-err1:
- __dwc3_gadget_stop(dwc);
-
-err0:
- return ret;
+ return dwc3_gadget_soft_connect(dwc);
}
void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a13c946e0663..f41a385a5c42 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3535,6 +3535,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* Drain any pending AIO completions */
drain_workqueue(ffs->io_completion_wq);
+ ffs_event_add(ffs, FUNCTIONFS_UNBIND);
if (!--opts->refcnt)
functionfs_unbind(ffs);
@@ -3559,7 +3560,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL;
- ffs_event_add(ffs, FUNCTIONFS_UNBIND);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 6956ad8ba8dd..a366abb45623 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/string_helpers.h>
#include <linux/usb/composite.h>
#include "u_ether.h"
@@ -965,6 +966,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
dev = netdev_priv(net);
snprintf(host_addr, len, "%pm", dev->host_mac);
+ string_upper(host_addr, host_addr);
+
return strlen(host_addr);
}
EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index c80f9bd51b75..a36913ae31f9 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -170,6 +170,9 @@ static int udc_pci_probe(
retval = -ENODEV;
goto err_probe;
}
+
+ udc = dev;
+
return 0;
err_probe:
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 4641153e9706..83fd1de14784 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -37,10 +37,14 @@ static const struct bus_type gadget_bus_type;
* @vbus: for udcs who care about vbus status, this value is real vbus status;
* for udcs who do not care about vbus status, this value is always true
* @started: the UDC's started state. True if the UDC had started.
- * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
- * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
- * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
- * called with this lock held.
+ * @allow_connect: Indicates whether UDC is allowed to be pulled up.
+ * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
+ * unbound.
+ * @connect_lock: protects udc->started, gadget->connect,
+ * gadget->allow_connect and gadget->deactivate. The routines
+ * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
+ * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and
+ * usb_gadget_udc_stop_locked() are called with this lock held.
*
* This represents the internal data structure which is used by the UDC-class
* to hold information about udc driver and gadget together.
@@ -52,6 +56,8 @@ struct usb_udc {
struct list_head list;
bool vbus;
bool started;
+ bool allow_connect;
+ struct work_struct vbus_work;
struct mutex connect_lock;
};
@@ -692,7 +698,6 @@ out:
}
EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
-/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
static int usb_gadget_connect_locked(struct usb_gadget *gadget)
__must_hold(&gadget->udc->connect_lock)
{
@@ -703,15 +708,12 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
goto out;
}
- if (gadget->connected)
- goto out;
-
- if (gadget->deactivated || !gadget->udc->started) {
+ if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) {
/*
- * If gadget is deactivated we only save new state.
- * Gadget will be connected automatically after activation.
- *
- * udc first needs to be started before gadget can be pulled up.
+ * If the gadget isn't usable (because it is deactivated,
+ * unbound, or not yet started), we only save the new state.
+ * The gadget will be connected automatically when it is
+ * activated/bound/started.
*/
gadget->connected = true;
goto out;
@@ -749,7 +751,6 @@ int usb_gadget_connect(struct usb_gadget *gadget)
}
EXPORT_SYMBOL_GPL(usb_gadget_connect);
-/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
__must_hold(&gadget->udc->connect_lock)
{
@@ -767,8 +768,6 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
/*
* If gadget is deactivated we only save new state.
* Gadget will stay disconnected after activation.
- *
- * udc should have been started before gadget being pulled down.
*/
gadget->connected = false;
goto out;
@@ -829,10 +828,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
{
int ret = 0;
+ mutex_lock(&gadget->udc->connect_lock);
if (gadget->deactivated)
- goto out;
+ goto unlock;
- mutex_lock(&gadget->udc->connect_lock);
if (gadget->connected) {
ret = usb_gadget_disconnect_locked(gadget);
if (ret)
@@ -848,7 +847,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
unlock:
mutex_unlock(&gadget->udc->connect_lock);
-out:
trace_usb_gadget_deactivate(gadget, ret);
return ret;
@@ -868,10 +866,10 @@ int usb_gadget_activate(struct usb_gadget *gadget)
{
int ret = 0;
+ mutex_lock(&gadget->udc->connect_lock);
if (!gadget->deactivated)
- goto out;
+ goto unlock;
- mutex_lock(&gadget->udc->connect_lock);
gadget->deactivated = false;
/*
@@ -882,7 +880,8 @@ int usb_gadget_activate(struct usb_gadget *gadget)
ret = usb_gadget_connect_locked(gadget);
mutex_unlock(&gadget->udc->connect_lock);
-out:
+unlock:
+ mutex_unlock(&gadget->udc->connect_lock);
trace_usb_gadget_activate(gadget, ret);
return ret;
@@ -1124,12 +1123,21 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
/* Acquire connect_lock before calling this function. */
static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
{
- if (udc->vbus && udc->started)
+ if (udc->vbus)
usb_gadget_connect_locked(udc->gadget);
else
usb_gadget_disconnect_locked(udc->gadget);
}
+static void vbus_event_work(struct work_struct *work)
+{
+ struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
+
+ mutex_lock(&udc->connect_lock);
+ usb_udc_connect_control_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+}
+
/**
* usb_udc_vbus_handler - updates the udc core vbus status, and try to
* connect or disconnect gadget
@@ -1138,17 +1146,23 @@ static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc
*
* The udc driver calls it when it wants to connect or disconnect gadget
* according to vbus status.
+ *
+ * This function can be invoked from interrupt context by irq handlers of
+ * the gadget drivers, however, usb_udc_connect_control() has to run in
+ * non-atomic context due to the following:
+ * a. Some of the gadget driver implementations expect the ->pullup
+ * callback to be invoked in non-atomic context.
+ * b. usb_gadget_disconnect() acquires udc_lock which is a mutex.
+ * Hence offload invocation of usb_udc_connect_control() to workqueue.
*/
void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
{
struct usb_udc *udc = gadget->udc;
- mutex_lock(&udc->connect_lock);
if (udc) {
udc->vbus = status;
- usb_udc_connect_control_locked(udc);
+ schedule_work(&udc->vbus_work);
}
- mutex_unlock(&udc->connect_lock);
}
EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
@@ -1381,6 +1395,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
mutex_lock(&udc_lock);
list_add_tail(&udc->list, &udc_list);
mutex_unlock(&udc_lock);
+ INIT_WORK(&udc->vbus_work, vbus_event_work);
ret = device_add(&udc->dev);
if (ret)
@@ -1512,6 +1527,7 @@ void usb_del_gadget(struct usb_gadget *gadget)
flush_work(&gadget->work);
device_del(&gadget->dev);
ida_free(&gadget_id_numbers, gadget->id_number);
+ cancel_work_sync(&udc->vbus_work);
device_unregister(&udc->dev);
}
EXPORT_SYMBOL_GPL(usb_del_gadget);
@@ -1583,6 +1599,7 @@ static int gadget_bind_driver(struct device *dev)
goto err_start;
}
usb_gadget_enable_async_callbacks(udc);
+ udc->allow_connect = true;
usb_udc_connect_control_locked(udc);
mutex_unlock(&udc->connect_lock);
@@ -1615,6 +1632,8 @@ static void gadget_unbind_driver(struct device *dev)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ udc->allow_connect = false;
+ cancel_work_sync(&udc->vbus_work);
mutex_lock(&udc->connect_lock);
usb_gadget_disconnect_locked(gadget);
usb_gadget_disable_async_callbacks(udc);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index aac8bc185afa..eb008e873361 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2877,9 +2877,9 @@ static int renesas_usb3_probe(struct platform_device *pdev)
struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent);
usb3->drd_reg = ddata->reg;
- ret = devm_request_irq(ddata->dev, ddata->drd_irq,
+ ret = devm_request_irq(&pdev->dev, ddata->drd_irq,
renesas_usb3_otg_irq, 0,
- dev_name(ddata->dev), usb3);
+ dev_name(&pdev->dev), usb3);
if (ret < 0)
return ret;
}
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 3592f757fe05..7bd2fddde770 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd)
uhci->rh_numports = uhci_count_ports(hcd);
- /* Intel controllers report the OverCurrent bit active on.
- * VIA controllers report it active off, so we'll adjust the
- * bit value. (It's not standardized in the UHCI spec.)
+ /*
+ * Intel controllers report the OverCurrent bit active on. VIA
+ * and ZHAOXIN controllers report it active off, so we'll adjust
+ * the bit value. (It's not standardized in the UHCI spec.)
*/
- if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
+ to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
uhci->oc_low = 1;
/* HP's server management chip requires a longer port reset delay. */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ddb79f23fb3b..79b3691f373f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/reset.h>
+#include <linux/suspend.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -387,7 +388,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
- xhci->quirks |= XHCI_BROKEN_D3COLD;
+ xhci->quirks |= XHCI_BROKEN_D3COLD_S2I;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
@@ -801,9 +802,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* Systems with the TI redriver that loses port status change events
* need to have the registers polled during D3, so avoid D3cold.
*/
- if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
+ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
pci_d3cold_disable(pdev);
+#ifdef CONFIG_SUSPEND
+ /* d3cold is broken, but only when s2idle is used */
+ if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE &&
+ xhci->quirks & (XHCI_BROKEN_D3COLD_S2I))
+ pci_d3cold_disable(pdev);
+#endif
+
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1ad12d5a4857..2bc82b3a2f98 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -276,6 +276,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
trace_xhci_inc_enq(ring);
}
+static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start,
+ struct xhci_segment *end_seg, union xhci_trb *end,
+ unsigned int num_segs)
+{
+ union xhci_trb *last_on_seg;
+ int num = 0;
+ int i = 0;
+
+ do {
+ if (start_seg == end_seg && end >= start)
+ return num + (end - start);
+ last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1];
+ num += last_on_seg - start;
+ start_seg = start_seg->next;
+ start = start_seg->trbs;
+ } while (i++ <= num_segs);
+
+ return -EINVAL;
+}
+
/*
* Check to see if there's room to enqueue num_trbs on the ring and make sure
* enqueue pointer will not advance into dequeue segment. See rules above.
@@ -2140,6 +2160,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
u32 trb_comp_code)
{
struct xhci_ep_ctx *ep_ctx;
+ int trbs_freed;
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
@@ -2209,9 +2230,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
}
/* Update ring dequeue pointer */
+ trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue,
+ td->last_trb_seg, td->last_trb,
+ ep_ring->num_segs);
+ if (trbs_freed < 0)
+ xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n");
+ else
+ ep_ring->num_trbs_free += trbs_freed;
ep_ring->dequeue = td->last_trb;
ep_ring->deq_seg = td->last_trb_seg;
- ep_ring->num_trbs_free += td->num_trbs - 1;
inc_deq(xhci, ep_ring);
return xhci_td_cleanup(xhci, td, ep_ring, td->status);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 08d721921b7b..6b690ec91ff3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1901,7 +1901,7 @@ struct xhci_hcd {
#define XHCI_DISABLE_SPARSE BIT_ULL(38)
#define XHCI_SG_TRB_CACHE_SIZE_QUIRK BIT_ULL(39)
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
-#define XHCI_BROKEN_D3COLD BIT_ULL(41)
+#define XHCI_BROKEN_D3COLD_S2I BIT_ULL(41)
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 644a55447fd7..fd42e3a0bd18 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -248,6 +248,8 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EM061K_LTA 0x0123
+#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
@@ -266,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200U 0x0901
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
+#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
+#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -1189,6 +1193,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8931df5a85fd..c54e9805da53 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -406,22 +406,25 @@ static DEF_SCSI_QCMD(queuecommand)
***********************************************************************/
/* Command timeout and abort */
-static int command_abort(struct scsi_cmnd *srb)
+static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
{
- struct us_data *us = host_to_us(srb->device->host);
-
- usb_stor_dbg(us, "%s called\n", __func__);
-
/*
* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
* bits are protected by the host lock.
*/
scsi_lock(us_to_host(us));
- /* Is this command still active? */
- if (us->srb != srb) {
+ /* is there any active pending command to abort ? */
+ if (!us->srb) {
scsi_unlock(us_to_host(us));
usb_stor_dbg(us, "-- nothing to abort\n");
+ return SUCCESS;
+ }
+
+ /* Does the command match the passed srb if any ? */
+ if (srb_match && us->srb != srb_match) {
+ scsi_unlock(us_to_host(us));
+ usb_stor_dbg(us, "-- pending command mismatch\n");
return FAILED;
}
@@ -444,6 +447,14 @@ static int command_abort(struct scsi_cmnd *srb)
return SUCCESS;
}
+static int command_abort(struct scsi_cmnd *srb)
+{
+ struct us_data *us = host_to_us(srb->device->host);
+
+ usb_stor_dbg(us, "%s called\n", __func__);
+ return command_abort_matching(us, srb);
+}
+
/*
* This invokes the transport reset mechanism to reset the state of the
* device
@@ -455,6 +466,9 @@ static int device_reset(struct scsi_cmnd *srb)
usb_stor_dbg(us, "%s called\n", __func__);
+ /* abort any pending command before reset */
+ command_abort_matching(us, NULL);
+
/* lock the device pointers and do the reset */
mutex_lock(&(us->dev_mutex));
result = us->transport_reset(us);
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 8f3e884222ad..66de880b28d0 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -516,6 +516,10 @@ static ssize_t pin_assignment_show(struct device *dev,
mutex_unlock(&dp->lock);
+ /* get_current_pin_assignments can return 0 when no matching pin assignments are found */
+ if (len == 0)
+ len++;
+
buf[len - 1] = '\n';
return len;
}
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index 0bcde1ff4d39..8cc66e4467c4 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -95,7 +95,7 @@ peak_current_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t
fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3;
+ return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3);
}
static DEVICE_ATTR_RO(fast_role_swap_current);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 8b075ca82ef6..603dbd44deba 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -886,6 +886,9 @@ static void tps6598x_remove(struct i2c_client *client)
{
struct tps6598x *tps = i2c_get_clientdata(client);
+ if (!client->irq)
+ cancel_delayed_work_sync(&tps->wq_poll);
+
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
@@ -917,7 +920,7 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
enable_irq(client->irq);
}
- if (client->irq)
+ if (!client->irq)
queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
msecs_to_jiffies(POLL_INTERVAL));
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 2b472ec01dc4..b664ecbb798b 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
if (ret)
return ret;
- if (cci & UCSI_CCI_BUSY) {
- ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0);
- return -EBUSY;
- }
+ if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY)
+ return ucsi_exec_command(ucsi, UCSI_CANCEL);
if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
@@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
return ucsi_read_error(ucsi);
}
+ if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
+ ret = ucsi_acknowledge_command(ucsi);
+ return ret ? ret : -EBUSY;
+ }
+
return UCSI_CCI_LENGTH(cci);
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index e29e32b306ad..279ac6a558d2 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3349,10 +3349,10 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
mlx5_vdpa_remove_debugfs(ndev->debugfs);
ndev->debugfs = NULL;
unregister_link_notifier(ndev);
+ _vdpa_unregister_device(dev);
wq = mvdev->wq;
mvdev->wq = NULL;
destroy_workqueue(wq);
- _vdpa_unregister_device(dev);
mgtdev->ndev = NULL;
}
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index de97e38c3b82..5f5c21674fdc 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1685,6 +1685,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
if (config->vq_num > 0xffff)
return false;
+ if (!config->name[0])
+ return false;
+
if (!device_is_allowed(config->device_id))
return false;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 3d4dd9420c30..0d2f805468e1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -860,6 +860,11 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
if (ret)
goto pin_unwind;
+ if (!pfn_valid(phys_pfn)) {
+ ret = -EINVAL;
+ goto pin_unwind;
+ }
+
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
if (ret) {
if (put_pfn(phys_pfn, dma->prot) && do_accounting)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 07181cd8d52e..ae2273196b0c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -935,13 +935,18 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
+ bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
+
if (zcopy_used) {
if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
vhost_net_ubuf_put(ubufs);
- nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
- % UIO_MAXIOV;
+ if (retry)
+ nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+ % UIO_MAXIOV;
+ else
+ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
- if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ if (retry) {
vhost_discard_vq_desc(vq, 1);
vhost_net_enable_vq(net, vq);
break;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 8c1aefc865f0..bf77924d5b60 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -407,7 +407,10 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_dev *d = &v->vdev;
+ u64 actual_features;
u64 features;
+ int i;
/*
* It's not allowed to change the features after they have
@@ -422,6 +425,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (vdpa_set_features(vdpa, features))
return -EINVAL;
+ /* let the vqs know what has been configured */
+ actual_features = ops->get_driver_features(vdpa);
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->acked_features = actual_features;
+ mutex_unlock(&vq->mutex);
+ }
+
return 0;
}
@@ -594,7 +607,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r)
return r;
- vq->last_avail_idx = vq_state.split.avail_index;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = vq_state.packed.last_avail_idx |
+ (vq_state.packed.last_avail_counter << 15);
+ vq->last_used_idx = vq_state.packed.last_used_idx |
+ (vq_state.packed.last_used_counter << 15);
+ } else {
+ vq->last_avail_idx = vq_state.split.avail_index;
+ }
break;
}
@@ -612,9 +632,15 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- vq_state.split.avail_index = vq->last_avail_idx;
- if (ops->set_vq_state(vdpa, idx, &vq_state))
- r = -EINVAL;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
+ vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
+ vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
+ vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
+ } else {
+ vq_state.split.avail_index = vq->last_avail_idx;
+ }
+ r = ops->set_vq_state(vdpa, idx, &vq_state);
break;
case VHOST_SET_VRING_CALL:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a92af08e7864..60c9ebd629dd 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -235,7 +235,7 @@ void vhost_dev_flush(struct vhost_dev *dev)
{
struct vhost_flush_struct flush;
- if (dev->worker) {
+ if (dev->worker.vtsk) {
init_completion(&flush.wait_event);
vhost_work_init(&flush.work, vhost_flush_work);
@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vhost_dev_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
- if (!dev->worker)
+ if (!dev->worker.vtsk)
return;
if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
@@ -255,8 +255,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
* sure it was not in the list.
* test_and_set_bit() implies a memory barrier.
*/
- llist_add(&work->node, &dev->worker->work_list);
- wake_up_process(dev->worker->vtsk->task);
+ llist_add(&work->node, &dev->worker.work_list);
+ vhost_task_wake(dev->worker.vtsk);
}
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
@@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue);
/* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev)
{
- return dev->worker && !llist_empty(&dev->worker->work_list);
+ return !llist_empty(&dev->worker.work_list);
}
EXPORT_SYMBOL_GPL(vhost_has_work);
@@ -333,31 +333,21 @@ static void vhost_vq_reset(struct vhost_dev *dev,
__vhost_vq_meta_reset(vq);
}
-static int vhost_worker(void *data)
+static bool vhost_worker(void *data)
{
struct vhost_worker *worker = data;
struct vhost_work *work, *work_next;
struct llist_node *node;
- for (;;) {
- /* mb paired w/ kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (vhost_task_should_stop(worker->vtsk)) {
- __set_current_state(TASK_RUNNING);
- break;
- }
-
- node = llist_del_all(&worker->work_list);
- if (!node)
- schedule();
+ node = llist_del_all(&worker->work_list);
+ if (node) {
+ __set_current_state(TASK_RUNNING);
node = llist_reverse_order(node);
/* make sure flag is seen after deletion */
smp_wmb();
llist_for_each_entry_safe(work, work_next, node, node) {
clear_bit(VHOST_WORK_QUEUED, &work->flags);
- __set_current_state(TASK_RUNNING);
kcov_remote_start_common(worker->kcov_handle);
work->fn(work);
kcov_remote_stop();
@@ -365,7 +355,7 @@ static int vhost_worker(void *data)
}
}
- return 0;
+ return !!node;
}
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
@@ -468,7 +458,8 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->umem = NULL;
dev->iotlb = NULL;
dev->mm = NULL;
- dev->worker = NULL;
+ memset(&dev->worker, 0, sizeof(dev->worker));
+ init_llist_head(&dev->worker.work_list);
dev->iov_limit = iov_limit;
dev->weight = weight;
dev->byte_weight = byte_weight;
@@ -542,47 +533,30 @@ static void vhost_detach_mm(struct vhost_dev *dev)
static void vhost_worker_free(struct vhost_dev *dev)
{
- struct vhost_worker *worker = dev->worker;
-
- if (!worker)
+ if (!dev->worker.vtsk)
return;
- dev->worker = NULL;
- WARN_ON(!llist_empty(&worker->work_list));
- vhost_task_stop(worker->vtsk);
- kfree(worker);
+ WARN_ON(!llist_empty(&dev->worker.work_list));
+ vhost_task_stop(dev->worker.vtsk);
+ dev->worker.kcov_handle = 0;
+ dev->worker.vtsk = NULL;
}
static int vhost_worker_create(struct vhost_dev *dev)
{
- struct vhost_worker *worker;
struct vhost_task *vtsk;
char name[TASK_COMM_LEN];
- int ret;
-
- worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
- if (!worker)
- return -ENOMEM;
- dev->worker = worker;
- worker->kcov_handle = kcov_common_handle();
- init_llist_head(&worker->work_list);
snprintf(name, sizeof(name), "vhost-%d", current->pid);
- vtsk = vhost_task_create(vhost_worker, worker, name);
- if (!vtsk) {
- ret = -ENOMEM;
- goto free_worker;
- }
+ vtsk = vhost_task_create(vhost_worker, &dev->worker, name);
+ if (!vtsk)
+ return -ENOMEM;
- worker->vtsk = vtsk;
+ dev->worker.kcov_handle = kcov_common_handle();
+ dev->worker.vtsk = vtsk;
vhost_task_start(vtsk);
return 0;
-
-free_worker:
- kfree(worker);
- dev->worker = NULL;
- return ret;
}
/* Caller should have device mutex */
@@ -1626,17 +1600,25 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
- if (s.num > 0xffff) {
- r = -EINVAL;
- break;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = s.num & 0xffff;
+ vq->last_used_idx = (s.num >> 16) & 0xffff;
+ } else {
+ if (s.num > 0xffff) {
+ r = -EINVAL;
+ break;
+ }
+ vq->last_avail_idx = s.num;
}
- vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
- s.num = vq->last_avail_idx;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
+ s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
+ else
+ s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break;
@@ -2575,12 +2557,11 @@ EXPORT_SYMBOL_GPL(vhost_disable_notify);
/* Create a new message. */
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
{
- struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ /* Make sure all padding within the structure is initialized. */
+ struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return NULL;
- /* Make sure all padding within the structure is initialized. */
- memset(&node->msg, 0, sizeof node->msg);
node->vq = vq;
node->msg.type = type;
return node;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 0308638cdeee..fc900be504b3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -92,13 +92,17 @@ struct vhost_virtqueue {
/* The routine to call when the Guest pings us, or timeout. */
vhost_work_fn_t handle_kick;
- /* Last available index we saw. */
+ /* Last available index we saw.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx;
/* Caches available index value from user. */
u16 avail_idx;
- /* Last index we used. */
+ /* Last index we used.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_used_idx;
/* Used flags */
@@ -154,7 +158,7 @@ struct vhost_dev {
struct vhost_virtqueue **vqs;
int nvqs;
struct eventfd_ctx *log_ctx;
- struct vhost_worker *worker;
+ struct vhost_worker worker;
struct vhost_iotlb *umem;
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 96e91570cdd3..0fdf5f46802c 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -124,7 +124,7 @@ config FB_PROVIDE_GET_FB_UNMAPPED_AREA
depends on FB
help
Allow generic frame-buffer to provide get_fb_unmapped_area
- function.
+ function to provide shareable character device support on nommu.
menuconfig FB_FOREIGN_ENDIAN
bool "Framebuffer foreign endianness support"
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index 024d0ee4f04f..08d15e408413 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -590,7 +590,7 @@ err_fb_alloc:
return retval;
}
-static int arcfb_remove(struct platform_device *dev)
+static void arcfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -601,12 +601,11 @@ static int arcfb_remove(struct platform_device *dev)
vfree((void __force *)info->screen_base);
framebuffer_release(info);
}
- return 0;
}
static struct platform_driver arcfb_driver = {
.probe = arcfb_probe,
- .remove = arcfb_remove,
+ .remove_new = arcfb_remove,
.driver = {
.name = "arcfb",
},
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index b02e4e645035..cba2b113b28b 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3498,11 +3498,6 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
if (ret)
goto atyfb_setup_generic_fail;
#endif
- if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
- par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2;
- else
- par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U;
-
/* according to ATI, we should use clock 3 for acelerated mode */
par->clk_wr_offset = 3;
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 519313b8bb00..648d6cac86e8 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -520,13 +520,10 @@ failed:
return -ENODEV;
}
-int au1100fb_drv_remove(struct platform_device *dev)
+void au1100fb_drv_remove(struct platform_device *dev)
{
struct au1100fb_device *fbdev = NULL;
- if (!dev)
- return -ENODEV;
-
fbdev = platform_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
@@ -543,8 +540,6 @@ int au1100fb_drv_remove(struct platform_device *dev)
clk_disable_unprepare(fbdev->lcdclk);
clk_put(fbdev->lcdclk);
}
-
- return 0;
}
#ifdef CONFIG_PM
@@ -593,9 +588,9 @@ static struct platform_driver au1100fb_driver = {
.name = "au1100-lcd",
},
.probe = au1100fb_drv_probe,
- .remove = au1100fb_drv_remove,
+ .remove_new = au1100fb_drv_remove,
.suspend = au1100fb_drv_suspend,
- .resume = au1100fb_drv_resume,
+ .resume = au1100fb_drv_resume,
};
module_platform_driver(au1100fb_driver);
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index b6b22fa4a8a0..aed88ce45bf0 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1765,7 +1765,7 @@ failed:
return ret;
}
-static int au1200fb_drv_remove(struct platform_device *dev)
+static void au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
struct fb_info *fbi;
@@ -1788,8 +1788,6 @@ static int au1200fb_drv_remove(struct platform_device *dev)
}
free_irq(platform_get_irq(dev, 0), (void *)dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1840,7 +1838,7 @@ static struct platform_driver au1200fb_driver = {
.pm = AU1200FB_PMOPS,
},
.probe = au1200fb_drv_probe,
- .remove = au1200fb_drv_remove,
+ .remove_new = au1200fb_drv_remove,
};
module_platform_driver(au1200fb_driver);
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 55e62dd96f9b..b518cacbf7cd 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -1193,7 +1193,7 @@ err:
}
-static int broadsheetfb_remove(struct platform_device *dev)
+static void broadsheetfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -1209,12 +1209,11 @@ static int broadsheetfb_remove(struct platform_device *dev)
module_put(par->board->owner);
framebuffer_release(info);
}
- return 0;
}
static struct platform_driver broadsheetfb_driver = {
.probe = broadsheetfb_probe,
- .remove = broadsheetfb_remove,
+ .remove_new = broadsheetfb_remove,
.driver = {
.name = "broadsheetfb",
},
diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c
index 9cbadcd18b25..025d663dc6fd 100644
--- a/drivers/video/fbdev/bw2.c
+++ b/drivers/video/fbdev/bw2.c
@@ -352,7 +352,7 @@ out_err:
return err;
}
-static int bw2_remove(struct platform_device *op)
+static void bw2_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct bw2_par *par = info->par;
@@ -363,8 +363,6 @@ static int bw2_remove(struct platform_device *op)
of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
framebuffer_release(info);
-
- return 0;
}
static const struct of_device_id bw2_match[] = {
@@ -381,7 +379,7 @@ static struct platform_driver bw2_driver = {
.of_match_table = bw2_match,
},
.probe = bw2_probe,
- .remove = bw2_remove,
+ .remove_new = bw2_remove,
};
static int __init bw2_init(void)
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index f98e8f298bc1..8587c9da0670 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -247,6 +247,9 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
+ if (!vc->vc_font.data)
+ return;
+
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index e808dc86001c..28739f1cb5e7 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1468,7 +1468,7 @@ __releases(&info->lock)
}
#if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU)
-unsigned long get_fb_unmapped_area(struct file *filp,
+static unsigned long get_fb_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
diff --git a/drivers/video/fbdev/i810/i810_dvt.c b/drivers/video/fbdev/i810/i810_dvt.c
index b4b3670667ab..2082b5c92e8f 100644
--- a/drivers/video/fbdev/i810/i810_dvt.c
+++ b/drivers/video/fbdev/i810/i810_dvt.c
@@ -14,6 +14,7 @@
#include "i810_regs.h"
#include "i810.h"
+#include "i810_main.h"
struct mode_registers std_modes[] = {
/* 640x480 @ 60Hz */
@@ -276,7 +277,7 @@ void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
}
-u32 i810_get_watermark(struct fb_var_screeninfo *var,
+u32 i810_get_watermark(const struct fb_var_screeninfo *var,
struct i810fb_par *par)
{
struct mode_registers *params = &par->regs;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 975dd682fae4..ee7d01ad1406 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1452,9 +1452,13 @@ static int init_imstt(struct fb_info *info)
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_YPAN;
- fb_alloc_cmap(&info->cmap, 0, 0);
+ if (fb_alloc_cmap(&info->cmap, 0, 0)) {
+ framebuffer_release(info);
+ return -ENODEV;
+ }
if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
return -ENODEV;
}
@@ -1531,8 +1535,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error;
info->pseudo_palette = par->palette;
ret = init_imstt(info);
- if (!ret)
- pci_set_drvdata(pdev, info);
+ if (ret)
+ goto error;
+
+ pci_set_drvdata(pdev, info);
return ret;
error:
diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c
index 727a10a59811..b15a8ad92ba7 100644
--- a/drivers/video/fbdev/matrox/matroxfb_maven.c
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.c
@@ -1291,7 +1291,7 @@ static struct i2c_driver maven_driver={
.driver = {
.name = "maven",
},
- .probe_new = maven_probe,
+ .probe = maven_probe,
.remove = maven_remove,
.id_table = maven_id,
};
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 1eaa35c27835..477789cff8e0 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -491,7 +491,8 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
if (IS_ERR(ddata->vcc_reg)) {
- r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n");
+ r = dev_err_probe(&spi->dev, PTR_ERR(ddata->vcc_reg),
+ "failed to get LCD VCC regulator\n");
goto err_regulator;
}
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 046b9990d27c..132d1a205011 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -844,7 +844,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
static struct i2c_driver ssd1307fb_driver = {
- .probe_new = ssd1307fb_probe,
+ .probe = ssd1307fb_probe,
.remove = ssd1307fb_remove,
.id_table = ssd1307fb_i2c_id,
.driver = {
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 14c9215284c5..686a234f3899 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -741,7 +741,7 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data)
packed_len = (fb->info.var.xres << 16) | fb->info.var.yres;
NGLE_SET_DSTXY(fb, packed_dst);
- /* Write zeroes to overlay planes */
+ /* Write zeroes to overlay planes */
NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
IBOvals(RopSrc, MaskAddrOffset(0),
BitmapExtent08, StaticReg(0),
@@ -1297,14 +1297,14 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
break;
default:
#ifdef FALLBACK_TO_1BPP
- printk(KERN_WARNING
+ printk(KERN_WARNING
"stifb: Unsupported graphics card (id=0x%08x) "
"- now trying 1bpp mode instead\n",
fb->id);
bpp = 1; /* default to 1 bpp */
break;
#else
- printk(KERN_WARNING
+ printk(KERN_WARNING
"stifb: Unsupported graphics card (id=0x%08x) "
"- skipping.\n",
fb->id);
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 216d49c9d47e..dabc30a09f96 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -27,6 +27,8 @@
#include <video/udlfb.h>
#include "edid.h"
+#define OUT_EP_NUM 1 /* The endpoint number we will use */
+
static const struct fb_fix_screeninfo dlfb_fix = {
.id = "udlfb",
.type = FB_TYPE_PACKED_PIXELS,
@@ -1541,24 +1543,16 @@ static const struct device_attribute fb_device_attrs[] = {
static int dlfb_select_std_channel(struct dlfb_data *dlfb)
{
int ret;
- void *buf;
static const u8 set_def_chn[] = {
0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2 };
- buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
-
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dlfb->udev, usb_sndctrlpipe(dlfb->udev, 0),
- NR_USB_REQUEST_CHANNEL,
+ ret = usb_control_msg_send(dlfb->udev, 0, NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
- buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
-
- kfree(buf);
+ &set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
return ret;
}
@@ -1652,7 +1646,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
struct fb_info *info;
int retval;
struct usb_device *usbdev = interface_to_usbdev(intf);
- struct usb_endpoint_descriptor *out;
+ static u8 out_ep[] = {OUT_EP_NUM + USB_DIR_OUT, 0};
/* usb initialization */
dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1666,9 +1660,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
- retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
- if (retval) {
- dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+ if (!usb_check_bulk_endpoints(intf, out_ep)) {
+ dev_err(&intf->dev, "Invalid DisplayLink device!\n");
+ retval = -EINVAL;
goto error;
}
@@ -1927,7 +1921,8 @@ retry:
}
/* urb->transfer_buffer_length set to actual before submit */
- usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1),
+ usb_fill_bulk_urb(urb, dlfb->udev,
+ usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
buf, size, dlfb_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig
index f9db0799ae67..da2d7ca531f0 100644
--- a/drivers/virt/coco/sev-guest/Kconfig
+++ b/drivers/virt/coco/sev-guest/Kconfig
@@ -2,6 +2,7 @@ config SEV_GUEST
tristate "AMD SEV Guest driver"
default m
depends on AMD_MEM_ENCRYPT
+ select CRYPTO
select CRYPTO_AEAD2
select CRYPTO_GCM
help
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 1f5219e12cc3..7beaf2c41fbb 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -325,8 +325,10 @@ static struct sock_mapping *pvcalls_new_active_socket(
void *page;
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (map == NULL)
+ if (map == NULL) {
+ sock_release(sock);
return NULL;
+ }
map->fedata = fedata;
map->sock = sock;
@@ -418,10 +420,8 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
req->u.connect.ref,
req->u.connect.evtchn,
sock);
- if (!map) {
+ if (!map)
ret = -EFAULT;
- sock_release(sock);
- }
out:
rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
@@ -561,7 +561,6 @@ static void __pvcalls_back_accept(struct work_struct *work)
sock);
if (!map) {
ret = -EFAULT;
- sock_release(sock);
goto out_error;
}