summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/hmat/hmat.c2
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/processor_driver.c9
-rw-r--r--drivers/acpi/processor_perflib.c40
-rw-r--r--drivers/acpi/processor_thermal.c42
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/amba/bus.c14
-rw-r--r--drivers/android/binder.c11
-rw-r--r--drivers/android/binder_alloc.c8
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/libahci_platform.c38
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/memory.c3
-rw-r--r--drivers/base/platform.c46
-rw-r--r--drivers/base/power/qos.c70
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/nbd.c43
-rw-r--r--drivers/block/null_blk_zoned.c3
-rw-r--r--drivers/block/rbd.c9
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btintel.c45
-rw-r--r--drivers/bluetooth/btintel.h5
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btusb.c54
-rw-r--r--drivers/bluetooth/btwilink.c337
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_ll.c39
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bluetooth/hci_qca.c135
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c6
-rw-r--r--drivers/bus/fsl-mc/dprc.c53
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c43
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h42
-rw-r--r--drivers/bus/ti-sysc.c18
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/clk/ti/clk-7xx.c6
-rw-r--r--drivers/clocksource/timer-of.c4
-rw-r--r--drivers/cpufreq/cpufreq.c72
-rw-r--r--drivers/cpufreq/intel_pstate.c30
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c15
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c4
-rw-r--r--drivers/crypto/chelsio/Kconfig2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c20
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma/imx-sdma.c8
-rw-r--r--drivers/dma/qcom/bam_dma.c19
-rw-r--r--drivers/dma/sprd-dma.c27
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/ti/cppi41.c21
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c10
-rw-r--r--drivers/edac/ghes_edac.c4
-rw-r--r--drivers/firmware/broadcom/Kconfig8
-rw-r--r--drivers/firmware/broadcom/Makefile1
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c279
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/Kconfig1
-rw-r--r--drivers/firmware/efi/cper.c2
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c16
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c24
-rw-r--r--drivers/firmware/efi/rci2-table.c2
-rw-r--r--drivers/firmware/efi/test/efi_test.c8
-rw-r--r--drivers/firmware/efi/tpm.c27
-rw-r--r--drivers/firmware/google/vpd_decode.c2
-rw-r--r--drivers/gpio/gpio-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-intel-mid.c9
-rw-r--r--drivers/gpio/gpio-lynxpoint.c10
-rw-r--r--drivers/gpio/gpio-max77620.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c9
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile19
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c7
-rw-r--r--drivers/gpu/drm/drm_atomic.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_writeback.c23
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c164
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h6
-rw-r--r--drivers/gpu/drm/i915/i915_request.c69
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c9
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c9
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c9
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c19
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/tiny/Kconfig1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c16
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c5
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c12
-rw-r--r--drivers/hid/hid-axff.c11
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-dr.c12
-rw-r--r--drivers/hid/hid-emsff.c12
-rw-r--r--drivers/hid/hid-gaff.c12
-rw-r--r--drivers/hid/hid-google-hammer.c4
-rw-r--r--drivers/hid/hid-holtekff.c12
-rw-r--r--drivers/hid/hid-hyperv.c56
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-lg2ff.c12
-rw-r--r--drivers/hid/hid-lg3ff.c11
-rw-r--r--drivers/hid/hid-lg4ff.c11
-rw-r--r--drivers/hid/hid-lgff.c11
-rw-r--r--drivers/hid/hid-logitech-hidpp.c248
-rw-r--r--drivers/hid/hid-microsoft.c12
-rw-r--r--drivers/hid/hid-prodikeys.c4
-rw-r--r--drivers/hid/hid-sony.c12
-rw-r--r--drivers/hid/hid-tmff.c12
-rw-r--r--drivers/hid/hid-zpff.c12
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c118
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c19
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c2
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/nct7904.c33
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c54
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c21
-rw-r--r--drivers/iio/accel/adxl372.c22
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c2
-rw-r--r--drivers/iio/adc/ad799x.c4
-rw-r--r--drivers/iio/adc/axp288_adc.c32
-rw-r--r--drivers/iio/adc/hx711.c10
-rw-r--r--drivers/iio/adc/meson_saradc.c10
-rw-r--r--drivers/iio/adc/stm32-adc-core.c70
-rw-r--r--drivers/iio/adc/stm32-adc-core.h137
-rw-r--r--drivers/iio/adc/stm32-adc.c109
-rw-r--r--drivers/iio/imu/adis_buffer.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c28
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c15
-rw-r--r--drivers/iio/light/Kconfig1
-rw-r--r--drivers/iio/light/opt3001.c6
-rw-r--r--drivers/iio/light/vcnl4000.c14
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/core_priv.h1
-rw-r--r--drivers/infiniband/core/device.c11
-rw-r--r--drivers/infiniband/core/iwcm.c52
-rw-r--r--drivers/infiniband/core/netlink.c107
-rw-r--r--drivers/infiniband/core/nldev.c14
-rw-r--r--drivers/infiniband/core/security.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c6
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/verbs.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c30
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c10
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c58
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c68
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c58
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c8
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c17
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c2
-rw-r--r--drivers/input/misc/da9063_onkey.c5
-rw-r--r--drivers/input/misc/soc_button_array.c17
-rw-r--r--drivers/input/mouse/elantech.c55
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/touchscreen/goodix.c58
-rw-r--r--drivers/input/touchscreen/st1232.c6
-rw-r--r--drivers/iommu/amd_iommu.c12
-rw-r--r--drivers/iommu/amd_iommu_quirks.c13
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/arm-smmu.c1
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/ipmmu-vmsa.c7
-rw-r--r--drivers/iommu/rockchip-iommu.c19
-rw-r--r--drivers/irqchip/irq-al-fic.c12
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c21
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-sifive-plic.c33
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h4
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c1
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c38
-rw-r--r--drivers/md/dm-cache-target.c28
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-snap.c94
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c3
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/mfd/mt6397-core.c64
-rw-r--r--drivers/misc/fastrpc.c1
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/hw-me.c21
-rw-r--r--drivers/misc/mei/hw-me.h8
-rw-r--r--drivers/misc/mei/mei_dev.h4
-rw-r--r--drivers/misc/mei/pci-me.c13
-rw-r--r--drivers/mmc/host/cqhci.c3
-rw-r--r--drivers/mmc/host/mxs-mmc.c7
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c31
-rw-r--r--drivers/mmc/host/sdhci-iproc.c1
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c6
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c5
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c107
-rw-r--r--drivers/net/caif/Kconfig16
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c74
-rw-r--r--drivers/net/dsa/b53/b53_priv.h8
-rw-r--r--drivers/net/dsa/bcm_sf2.c73
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/dsa_loop.c5
-rw-r--r--drivers/net/dsa/lan9303-core.c4
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c4
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c7
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c7
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c6
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c14
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h20
-rw-r--r--drivers/net/dsa/mt7530.c23
-rw-r--r--drivers/net/dsa/mv88e6060.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c434
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h24
-rw-r--r--drivers/net/dsa/qca8k.c16
-rw-r--r--drivers/net/dsa/realtek-smi-core.c5
-rw-r--r--drivers/net/dsa/rtl8366.c11
-rw-r--r--drivers/net/dsa/rtl8366rb.c16
-rw-r--r--drivers/net/dsa/sja1105/Kconfig4
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h39
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c65
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c12
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c350
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c548
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h95
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c257
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h4
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c5
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c35
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c158
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c107
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c130
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.c147
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c1392
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h140
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c350
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c113
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h65
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h242
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h174
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c99
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c15
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c10
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c11
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c121
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c416
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c64
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c125
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c19
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c105
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c81
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c370
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c207
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c27
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c302
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h62
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c149
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h144
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c17
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c93
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c588
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c188
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c568
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c142
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c6
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h12
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c273
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c48
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c118
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c857
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c984
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c573
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h140
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c1181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c31
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c104
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c229
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c28
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c625
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h32
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c30
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c497
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c18
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c31
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c60
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c128
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h196
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c43
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h15
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c26
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c290
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c136
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c14
-rw-r--r--drivers/net/ethernet/sfc/efx.c269
-rw-r--r--drivers/net/ethernet/sfc/efx.h3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c25
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h64
-rw-r--r--drivers/net/ethernet/sfc/rx.c149
-rw-r--r--drivers/net/ethernet/sfc/tx.c92
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c62
-rw-r--r--drivers/net/ethernet/socionext/netsec.c39
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c134
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/fjes/fjes_main.c15
-rw-r--r--drivers/net/hamradio/bpqether.c22
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c26
-rw-r--r--drivers/net/hyperv/rndis_filter.c8
-rw-r--r--drivers/net/ieee802154/atusb.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/macsec.c18
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/bus.c1
-rw-r--r--drivers/net/netdevsim/dev.c372
-rw-r--r--drivers/net/netdevsim/fib.c175
-rw-r--r--drivers/net/netdevsim/health.c325
-rw-r--r--drivers/net/netdevsim/netdev.c10
-rw-r--r--drivers/net/netdevsim/netdevsim.h33
-rw-r--r--drivers/net/phy/at803x.c102
-rw-r--r--drivers/net/phy/bcm7xxx.c1
-rw-r--r--drivers/net/phy/broadcom.c89
-rw-r--r--drivers/net/phy/dp83867.c152
-rw-r--r--drivers/net/phy/marvell.c255
-rw-r--r--drivers/net/phy/mdio_device.c2
-rw-r--r--drivers/net/phy/micrel.c42
-rw-r--r--drivers/net/phy/mscc.c14
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy-core.c36
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c215
-rw-r--r--drivers/net/phy/phylink.c46
-rw-r--r--drivers/net/phy/sfp-bus.c65
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/team/team.c21
-rw-r--r--drivers/net/tun.c23
-rw-r--r--drivers/net/usb/ax88179_178a.c34
-rw-r--r--drivers/net/usb/cdc_ether.c14
-rw-r--r--drivers/net/usb/hso.c25
-rw-r--r--drivers/net/usb/lan78xx.c20
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c1281
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/vxlan.c67
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c6
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c295
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c25
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c25
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c43
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c46
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c54
-rw-r--r--drivers/net/xen-netback/interface.c115
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c1
-rw-r--r--drivers/nfc/pn533/Kconfig11
-rw-r--r--drivers/nfc/pn533/Makefile2
-rw-r--r--drivers/nfc/pn533/i2c.c32
-rw-r--r--drivers/nfc/pn533/pn533.c281
-rw-r--r--drivers/nfc/pn533/pn533.h40
-rw-r--r--drivers/nfc/pn533/uart.c323
-rw-r--r--drivers/nfc/pn533/usb.c19
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c1
-rw-r--r--drivers/nvme/host/core.c226
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h7
-rw-r--r--drivers/nvme/host/pci.c103
-rw-r--r--drivers/nvme/host/rdma.c27
-rw-r--r--drivers/nvme/host/tcp.c17
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c16
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/tcp.c12
-rw-r--r--drivers/of/of_mdio.c4
-rw-r--r--drivers/of/of_net.c16
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/of/unittest.c1
-rw-r--r--drivers/opp/core.c16
-rw-r--r--drivers/opp/of.c9
-rw-r--r--drivers/parisc/sba_iommu.c8
-rw-r--r--drivers/pci/pci.c24
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c124
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h3
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c12
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c4
-rw-r--r--drivers/pinctrl/berlin/pinctrl-as370.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c27
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c26
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c2
-rw-r--r--drivers/platform/x86/classmate-laptop.c12
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c1
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c3
-rw-r--r--drivers/ptp/Kconfig16
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/idt8a340_reg.h659
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1425
-rw-r--r--drivers/ptp/ptp_clockmatrix.h104
-rw-r--r--drivers/ptp/ptp_dte.c4
-rw-r--r--drivers/ptp/ptp_qoriq.c3
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/da9062-regulator.c118
-rw-r--r--drivers/regulator/fixed.c5
-rw-r--r--drivers/regulator/lochnagar-regulator.c1
-rw-r--r--drivers/regulator/of_regulator.c27
-rw-r--r--drivers/regulator/pfuze100-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c4
-rw-r--r--drivers/regulator/ti-abb-regulator.c26
-rw-r--r--drivers/s390/block/dasd_eckd.c81
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/css.c7
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/qdio.h1
-rw-r--r--drivers/s390/cio/qdio_main.c31
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c3
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c111
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/s390/net/qeth_l2_main.c44
-rw-r--r--drivers/s390/net/qeth_l3.h24
-rw-r--r--drivers/s390/net/qeth_l3_main.c41
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c16
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c29
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c6
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c109
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c26
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sni_53c710.c4
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/fsl/qbman/qman.c7
-rw-r--r--drivers/soc/imx/soc-imx-scu.c2
-rw-r--r--drivers/spi/spi.c127
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/exfat/Kconfig3
-rw-r--r--drivers/staging/exfat/Makefile2
-rw-r--r--drivers/staging/exfat/exfat.h2
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c2
-rw-r--r--drivers/staging/exfat/exfat_cache.c2
-rw-r--r--drivers/staging/exfat/exfat_core.c2
-rw-r--r--drivers/staging/exfat/exfat_nls.c2
-rw-r--r--drivers/staging/exfat/exfat_super.c7
-rw-r--r--drivers/staging/exfat/exfat_upcase.c2
-rw-r--r--drivers/staging/fbtft/Kconfig12
-rw-r--r--drivers/staging/fbtft/Makefile4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c7
-rw-r--r--drivers/staging/fbtft/fbtft_device.c1261
-rw-r--r--drivers/staging/fbtft/flexfb.c851
-rw-r--r--drivers/staging/hp/Kconfig (renamed from drivers/net/ethernet/hp/Kconfig)0
-rw-r--r--drivers/staging/hp/Makefile (renamed from drivers/net/ethernet/hp/Makefile)0
-rw-r--r--drivers/staging/hp/hp100.c (renamed from drivers/net/ethernet/hp/hp100.c)0
-rw-r--r--drivers/staging/hp/hp100.h (renamed from drivers/net/ethernet/hp/hp100.h)0
-rw-r--r--drivers/staging/octeon/ethernet-tx.c15
-rw-r--r--drivers/staging/octeon/octeon-stubs.h2
-rw-r--r--drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c6
-rw-r--r--drivers/staging/speakup/sysfs-driver-speakup369
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c1
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/target/target_core_device.c21
-rw-r--r--drivers/thermal/cpu_cooling.c14
-rw-r--r--drivers/tty/n_hdlc.c5
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c8
-rw-r--r--drivers/tty/serial/8250/8250_omap.c5
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c21
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c3
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c3
-rw-r--r--drivers/usb/cdns3/core.c22
-rw-r--r--drivers/usb/cdns3/ep0.c12
-rw-r--r--drivers/usb/cdns3/gadget.c43
-rw-r--r--drivers/usb/class/usblp.c12
-rw-r--r--drivers/usb/dwc3/drd.c7
-rw-r--r--drivers/usb/dwc3/gadget.c7
-rw-r--r--drivers/usb/dwc3/host.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c3
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c10
-rw-r--r--drivers/usb/host/xhci-ext-caps.c1
-rw-r--r--drivers/usb/host/xhci-ring.c4
-rw-r--r--drivers/usb/host/xhci.c78
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/adutux.c24
-rw-r--r--drivers/usb/misc/chaoskey.c5
-rw-r--r--drivers/usb/misc/iowarrior.c48
-rw-r--r--drivers/usb/misc/ldusb.c47
-rw-r--r--drivers/usb/misc/legousbtower.c63
-rw-r--r--drivers/usb/misc/rio500.c554
-rw-r--r--drivers/usb/misc/rio500_usb.h20
-rw-r--r--drivers/usb/misc/usblcd.c60
-rw-r--r--drivers/usb/misc/yurex.c18
-rw-r--r--drivers/usb/renesas_usbhs/common.h1
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c18
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c15
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c12
-rw-r--r--drivers/usb/serial/usb-serial.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c14
-rw-r--r--drivers/usb/typec/ucsi/displayport.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c42
-rw-r--r--drivers/usb/usb-skeleton.c19
-rw-r--r--drivers/usb/usbip/vhci_hcd.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vringh.c8
-rw-r--r--drivers/video/logo/Makefile21
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c3
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/w1/slaves/Kconfig1
-rw-r--r--drivers/xen/balloon.c24
-rw-r--r--drivers/xen/efi.c84
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/pvcalls-back.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c20
991 files changed, 28561 insertions, 12757 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3b2525908dd8..a1a858ad4d18 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
pcc_data[pcc_ss_id]->refcount--;
if (!pcc_data[pcc_ss_id]->refcount) {
pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
- pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
kfree(pcc_data[pcc_ss_id]);
+ pcc_data[pcc_ss_id] = NULL;
}
}
}
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index 8f9a28a870b0..8b0de8a3c647 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -403,7 +403,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
p->flags, p->processor_PD, p->memory_PD);
- if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+ if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 1413324982f0..14e68f202f81 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev,
nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
- device_unlock(dev);
+ nfit_device_unlock(dev);
return rc;
}
acpi_desc = to_acpi_desc(nd_desc);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 08da9c29f1e9..62114a03a51a 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- int cpu = policy->cpu;
if (event == CPUFREQ_CREATE_POLICY) {
- acpi_thermal_cpufreq_init(cpu);
- acpi_processor_ppc_init(cpu);
+ acpi_thermal_cpufreq_init(policy);
+ acpi_processor_ppc_init(policy);
} else if (event == CPUFREQ_REMOVE_POLICY) {
- acpi_processor_ppc_exit(cpu);
- acpi_thermal_cpufreq_exit(cpu);
+ acpi_processor_ppc_exit(policy);
+ acpi_thermal_cpufreq_exit(policy);
}
return 0;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 2261713d1aec..5909e8fa4013 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
pr->performance_platform_limit = (int)ppc;
if (ppc >= pr->performance->state_count ||
- unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+ unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
- ret = dev_pm_qos_update_request(&pr->perflib_req,
+ ret = freq_qos_update_request(&pr->perflib_req,
pr->performance->states[ppc].core_frequency * 1000);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
@@ -157,26 +157,36 @@ void acpi_processor_ignore_ppc_init(void)
ignore_ppc = 0;
}
-void acpi_processor_ppc_init(int cpu)
+void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
- int ret;
+ unsigned int cpu;
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
- INT_MAX);
- if (ret < 0) {
- pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
- ret);
- return;
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ int ret;
+
+ if (!pr)
+ continue;
+
+ ret = freq_qos_add_request(&policy->constraints,
+ &pr->perflib_req,
+ FREQ_QOS_MAX, INT_MAX);
+ if (ret < 0)
+ pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+ cpu, ret);
}
}
-void acpi_processor_ppc_exit(int cpu)
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
+ unsigned int cpu;
- dev_pm_qos_remove_request(&pr->perflib_req);
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (pr)
+ freq_qos_remove_request(&pr->perflib_req);
+ }
}
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index ec2638f1df4f..41feb88ee92d 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
pr = per_cpu(processors, i);
- if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+ if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
policy = cpufreq_cpu_get(i);
@@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
cpufreq_cpu_put(policy);
- ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
if (ret < 0) {
pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
pr->id, ret);
@@ -125,26 +125,36 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
return 0;
}
-void acpi_thermal_cpufreq_init(int cpu)
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
- int ret;
-
- ret = dev_pm_qos_add_request(get_cpu_device(cpu),
- &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
- INT_MAX);
- if (ret < 0) {
- pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
- ret);
- return;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+ int ret;
+
+ if (!pr)
+ continue;
+
+ ret = freq_qos_add_request(&policy->constraints,
+ &pr->thermal_req,
+ FREQ_QOS_MAX, INT_MAX);
+ if (ret < 0)
+ pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+ cpu, ret);
}
}
-void acpi_thermal_cpufreq_exit(int cpu)
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
- struct acpi_processor *pr = per_cpu(processors, cpu);
+ unsigned int cpu;
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ struct acpi_processor *pr = per_cpu(processors, policy->cpu);
- dev_pm_qos_remove_request(&pr->thermal_req);
+ if (pr)
+ freq_qos_remove_request(&pr->thermal_req);
+ }
}
#else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9fa77d72ef27..2af937a8b1c5 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -362,19 +362,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
},
},
/*
- * https://bugzilla.kernel.org/show_bug.cgi?id=196907
- * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
- * S0 Idle firmware interface.
- */
- {
- .callback = init_default_s3,
- .ident = "Dell XPS13 9360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
- },
- },
- /*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
* https://bugzilla.kernel.org/show_bug.cgi?id=199057).
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f39f075abff9..fe1523664816 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -409,9 +409,11 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
- if (PTR_ERR(rstc) != -EPROBE_DEFER)
- dev_err(&dev->dev, "Can't get amba reset!\n");
- return PTR_ERR(rstc);
+ ret = PTR_ERR(rstc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "can't get reset: %d\n",
+ ret);
+ goto err_reset;
}
reset_control_deassert(rstc);
reset_control_put(rstc);
@@ -472,6 +474,12 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
release_resource(&dev->res);
err_out:
return ret;
+
+ err_reset:
+ amba_put_disable_pclk(dev);
+ iounmap(tmp);
+ dev_pm_domain_detach(&dev->dev, true);
+ goto err_release;
}
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c0a491277aca..265d9dd46a5e 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -57,6 +57,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
@@ -66,6 +67,7 @@
#include <linux/task_work.h>
#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
#include <asm/cacheflush.h>
@@ -95,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
#define SZ_1K 0x400
#endif
-#ifndef SZ_4M
-#define SZ_4M 0x400000
-#endif
-
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
@@ -2876,7 +2874,7 @@ static void binder_transaction(struct binder_proc *proc,
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
- e->context_name = proc->context->name;
+ strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
if (reply) {
binder_inner_proc_lock(proc);
@@ -5175,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
if (proc->tsk != current->group_leader)
return -EINVAL;
- if ((vma->vm_end - vma->vm_start) > SZ_4M)
- vma->vm_end = vma->vm_start + SZ_4M;
-
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
__func__, proc->pid, vma->vm_start, vma->vm_end,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6d79a1b0d446..eb76a823fbb2 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -22,6 +22,7 @@
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
+#include <linux/sizes.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -156,7 +157,7 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
}
/**
- * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * binder_alloc_prepare_to_free() - get buffer given user ptr
* @alloc: binder_alloc for this proc
* @user_ptr: User pointer to buffer data
*
@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+ SZ_4M);
+ alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
if (alloc->pages == NULL) {
@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index bd47f7f72075..ae991097d14d 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -130,7 +130,7 @@ struct binder_transaction_log_entry {
int return_error_line;
uint32_t return_error;
uint32_t return_error_param;
- const char *context_name;
+ char context_name[BINDERFS_MAX_NAME + 1];
};
struct binder_transaction_log {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dd92faf197d5..05c2b32dcc4d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1600,7 +1600,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
*/
if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
return;
- if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+
+ /* Skip applying the quirk on Denverton and beyond */
+ if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
return;
/*
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index e742780950de..8befce036af8 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -153,17 +153,13 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
{
int rc, i;
- if (hpriv->ahci_regulator) {
- rc = regulator_enable(hpriv->ahci_regulator);
- if (rc)
- return rc;
- }
+ rc = regulator_enable(hpriv->ahci_regulator);
+ if (rc)
+ return rc;
- if (hpriv->phy_regulator) {
- rc = regulator_enable(hpriv->phy_regulator);
- if (rc)
- goto disable_ahci_pwrs;
- }
+ rc = regulator_enable(hpriv->phy_regulator);
+ if (rc)
+ goto disable_ahci_pwrs;
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
@@ -181,11 +177,9 @@ disable_target_pwrs:
if (hpriv->target_pwrs[i])
regulator_disable(hpriv->target_pwrs[i]);
- if (hpriv->phy_regulator)
- regulator_disable(hpriv->phy_regulator);
+ regulator_disable(hpriv->phy_regulator);
disable_ahci_pwrs:
- if (hpriv->ahci_regulator)
- regulator_disable(hpriv->ahci_regulator);
+ regulator_disable(hpriv->ahci_regulator);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
@@ -207,10 +201,8 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
regulator_disable(hpriv->target_pwrs[i]);
}
- if (hpriv->ahci_regulator)
- regulator_disable(hpriv->ahci_regulator);
- if (hpriv->phy_regulator)
- regulator_disable(hpriv->phy_regulator);
+ regulator_disable(hpriv->ahci_regulator);
+ regulator_disable(hpriv->phy_regulator);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
/**
@@ -359,7 +351,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
struct regulator *target_pwr;
int rc = 0;
- target_pwr = regulator_get_optional(dev, "target");
+ target_pwr = regulator_get(dev, "target");
if (!IS_ERR(target_pwr))
hpriv->target_pwrs[port] = target_pwr;
@@ -436,16 +428,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->clks[i] = clk;
}
- hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+ hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
if (IS_ERR(hpriv->ahci_regulator)) {
rc = PTR_ERR(hpriv->ahci_regulator);
- if (rc == -EPROBE_DEFER)
+ if (rc != 0)
goto err_out;
- rc = 0;
- hpriv->ahci_regulator = NULL;
}
- hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+ hpriv->phy_regulator = devm_regulator_get(dev, "phy");
if (IS_ERR(hpriv->phy_regulator)) {
rc = PTR_ERR(hpriv->phy_regulator);
if (rc == -EPROBE_DEFER)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 76d0f9de767b..58e09ffe8b9c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4791,27 +4791,6 @@ void ata_scsi_hotplug(struct work_struct *work)
return;
}
- /*
- * XXX - UGLY HACK
- *
- * The block layer suspend/resume path is fundamentally broken due
- * to freezable kthreads and workqueue and may deadlock if a block
- * device gets removed while resume is in progress. I don't know
- * what the solution is short of removing freezable kthreads and
- * workqueues altogether.
- *
- * The following is an ugly hack to avoid kicking off device
- * removal while freezer is active. This is a joke but does avoid
- * this particular deadlock scenario.
- *
- * https://bugzilla.kernel.org/show_bug.cgi?id=62801
- * http://marc.info/?l=linux-kernel&m=138695698516487
- */
-#ifdef CONFIG_FREEZER
- while (pm_freezing)
- msleep(10);
-#endif
-
DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 2bbab0230aeb..aad00d2b28f5 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1070,7 +1070,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
RC_FLAGS_BFPS_BFP * bfp |
RC_FLAGS_RXBM_PSB, 0, 0);
break;
- };
+ }
if (IS_FS50 (dev)) {
submit_command (dev, &dev->hp_txq,
QE_CMD_REG_WR | QE_CMD_IMM_INQ,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2db62d98e395..7bd9cd366d41 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -9,6 +9,7 @@
*/
#include <linux/acpi.h>
+#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
@@ -3179,6 +3180,8 @@ void device_shutdown(void)
wait_for_device_probe();
device_block_probing();
+ cpufreq_suspend();
+
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 6bea4f3f8040..55907c27075b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
return -ENXIO;
+ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+ if (!pfn_to_online_page(pfn))
+ return -EIO;
ret = soft_offline_page(pfn_to_page(pfn), 0);
return ret == 0 ? count : ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b6c6c7d97d5b..b230beb6ccb4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -241,12 +241,8 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
}
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
-/**
- * platform_get_irq_byname - get an IRQ for a device by name
- * @dev: platform device
- * @name: IRQ name
- */
-int platform_get_irq_byname(struct platform_device *dev, const char *name)
+static int __platform_get_irq_byname(struct platform_device *dev,
+ const char *name)
{
struct resource *r;
@@ -262,12 +258,48 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
if (r)
return r->start;
- dev_err(&dev->dev, "IRQ %s not found\n", name);
return -ENXIO;
}
+
+/**
+ * platform_get_irq_byname - get an IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an IRQ like platform_get_irq(), but then by name rather then by index.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
+{
+ int ret;
+
+ ret = __platform_get_irq_byname(dev, name);
+ if (ret < 0 && ret != -EPROBE_DEFER)
+ dev_err(&dev->dev, "IRQ %s not found\n", name);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
/**
+ * platform_get_irq_byname_optional - get an optional IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname_optional(struct platform_device *dev,
+ const char *name)
+{
+ return __platform_get_irq_byname(dev, name);
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
+
+/**
* platform_add_devices - add a numbers of platform devices
* @devs: array of platform devices to add
* @num: number of platform devices in array
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 6c90fd7e2ff8..350dcafd751f 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
spin_lock_irqsave(&dev->power.lock, flags);
- switch (type) {
- case DEV_PM_QOS_RESUME_LATENCY:
+ if (type == DEV_PM_QOS_RESUME_LATENCY) {
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
- break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
- : pm_qos_read_value(&qos->min_frequency);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
- : pm_qos_read_value(&qos->max_frequency);
- break;
- default:
+ } else {
WARN_ON(1);
ret = 0;
}
@@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value);
}
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = pm_qos_update_target(&qos->min_frequency,
- &req->data.pnode, action, value);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = pm_qos_update_target(&qos->max_frequency,
- &req->data.pnode, action, value);
- break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
- c = &qos->min_frequency;
- plist_head_init(&c->list);
- c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
- c->type = PM_QOS_MAX;
- c->notifiers = ++n;
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
- c = &qos->max_frequency;
- plist_head_init(&c->list);
- c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
- c->type = PM_QOS_MIN;
- c->notifiers = ++n;
- BLOCKING_INIT_NOTIFIER_HEAD(n);
-
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
memset(req, 0, sizeof(*req));
}
- c = &qos->min_frequency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
- c = &qos->max_frequency;
- plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
- apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- }
-
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE:
- case DEV_PM_QOS_MIN_FREQUENCY:
- case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
@@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier);
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
- notifier);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
- notifier);
- break;
default:
WARN_ON(1);
ret = -EINVAL;
@@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
- case DEV_PM_QOS_MIN_FREQUENCY:
- ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
- notifier);
- break;
- case DEV_PM_QOS_MAX_FREQUENCY:
- ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
- notifier);
- break;
default:
WARN_ON(1);
ret = -EINVAL;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1410fa893653..f6f77eaa7217 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
+ if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
+ /* In case of direct I/O, match underlying block size */
+ unsigned short bsize = bdev_logical_block_size(
+ inode->i_sb->s_bdev);
+
+ blk_queue_logical_block_size(lo->lo_queue, bsize);
+ blk_queue_physical_block_size(lo->lo_queue, bsize);
+ blk_queue_io_min(lo->lo_queue, bsize);
+ }
+
loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ac07e8c94c79..a94ee45440b3 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd)
if (refcount_dec_and_mutex_lock(&nbd->refs,
&nbd_index_mutex)) {
idr_remove(&nbd_index_idr, nbd->index);
- mutex_unlock(&nbd_index_mutex);
nbd_dev_remove(nbd);
+ mutex_unlock(&nbd_index_mutex);
}
}
@@ -385,17 +385,16 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
+ if (!mutex_trylock(&cmd->lock))
+ return BLK_EH_RESET_TIMER;
+
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
+ mutex_unlock(&cmd->lock);
goto done;
}
config = nbd->config;
- if (!mutex_trylock(&cmd->lock)) {
- nbd_config_put(nbd);
- return BLK_EH_RESET_TIMER;
- }
-
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
@@ -711,6 +710,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
ret = -ENOENT;
goto out;
}
+ if (cmd->status != BLK_STS_OK) {
+ dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
+ req);
+ ret = -ENOENT;
+ goto out;
+ }
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
req);
@@ -792,7 +797,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ mutex_lock(&cmd->lock);
cmd->status = BLK_STS_IOERR;
+ mutex_unlock(&cmd->lock);
+
blk_mq_complete_request(req);
return true;
}
@@ -972,6 +980,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
}
+static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+ int *err)
+{
+ struct socket *sock;
+
+ *err = 0;
+ sock = sockfd_lookup(fd, err);
+ if (!sock)
+ return NULL;
+
+ if (sock->ops->shutdown == sock_no_shutdown) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ return sock;
+}
+
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
bool netlink)
{
@@ -981,7 +1008,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct nbd_sock *nsock;
int err;
- sock = sockfd_lookup(arg, &err);
+ sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
@@ -1033,7 +1060,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
int i;
int err;
- sock = sockfd_lookup(arg, &err);
+ sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index eabc116832a7..3d7fdea872f8 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
zone->wp = zone->start;
break;
default:
- cmd->error = BLK_STS_NOTSUPP;
- break;
+ return BLK_STS_NOTSUPP;
}
return BLK_STS_OK;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7c4350c0fb77..39136675dae5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -6639,10 +6639,13 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
- if (ret > 0)
+ if (ret > 0) {
ret = rbd_dev->acquire_err;
- else if (!ret)
- ret = -ETIMEDOUT;
+ } else {
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ if (!ret)
+ ret = -ETIMEDOUT;
+ }
if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d58a359a6622..4285e75e52c3 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct file *file;
struct zram *zram = dev_to_zram(dev);
- struct file *file = zram->backing_dev;
char *p;
ssize_t ret;
down_read(&zram->init_lock);
- if (!zram->backing_dev) {
+ file = zram->backing_dev;
+ if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index aae665a3a254..f7aa2dc1ff85 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -380,17 +380,6 @@ config BT_ATH3K
Say Y here to compile support for "Atheros firmware download driver"
into the kernel or say M to compile it as module (ath3k).
-config BT_WILINK
- tristate "Texas Instruments WiLink7 driver"
- depends on TI_ST
- help
- This enables the Bluetooth driver for Texas Instrument's BT/FM/GPS
- combo devices. This makes use of shared transport line discipline
- core driver to communicate with the BT core of the combo chip.
-
- Say Y here to compile support for Texas Instrument's WiLink7 driver
- into the kernel or say M to compile it as module (btwilink).
-
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 34887b9b3a85..1a58a3ae142c 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_BT_INTEL) += btintel.o
obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
-obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o
obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index bb99c8653aab..62e781a18bf0 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -709,6 +709,51 @@ done:
}
EXPORT_SYMBOL_GPL(btintel_download_firmware);
+void btintel_reset_to_bootloader(struct hci_dev *hdev)
+{
+ struct intel_reset params;
+ struct sk_buff *skb;
+
+ /* Send Intel Reset command. This will result in
+ * re-enumeration of BT controller.
+ *
+ * Intel Reset parameter description:
+ * reset_type : 0x00 (Soft reset),
+ * 0x01 (Hard reset)
+ * patch_enable : 0x00 (Do not enable),
+ * 0x01 (Enable)
+ * ddc_reload : 0x00 (Do not reload),
+ * 0x01 (Reload)
+ * boot_option: 0x00 (Current image),
+ * 0x01 (Specified boot address)
+ * boot_param: Boot address
+ *
+ */
+ params.reset_type = 0x01;
+ params.patch_enable = 0x01;
+ params.ddc_reload = 0x01;
+ params.boot_option = 0x00;
+ params.boot_param = cpu_to_le32(0x00000000);
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ &params, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "FW download error recovery failed (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+ bt_dev_info(hdev, "Intel reset sent to retry FW download");
+ kfree_skb(skb);
+
+ /* Current Intel BT controllers(ThP/JfP) hold the USB reset
+ * lines for 2ms when it receives Intel Reset in bootloader mode.
+ * Whereas, the upcoming Intel BT controllers will hold USB reset
+ * for 150ms. To keep the delay generic, 150ms is chosen here.
+ */
+ msleep(150);
+}
+EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 3d846190f2bf..a69ea8a87b9b 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -87,6 +87,7 @@ int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
+void btintel_reset_to_bootloader(struct hci_dev *hdev);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -181,4 +182,8 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
{
return -EOPNOTSUPP;
}
+
+static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
+{
+}
#endif
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index bf3c02be6930..ae9a2047f242 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -418,7 +418,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
if (IS_ERR(skb)) {
rtl_dev_err(hdev, "download fw command failed (%ld)",
PTR_ERR(skb));
- ret = -PTR_ERR(skb);
+ ret = PTR_ERR(skb);
goto out;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a9c35ebb30f8..04a139e7793f 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2182,8 +2182,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* loaded.
*/
err = btintel_read_version(hdev, &ver);
- if (err)
+ if (err) {
+ bt_dev_err(hdev, "Intel Read version failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
return err;
+ }
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
@@ -2326,9 +2329,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Start firmware downloading and get boot parameter */
err = btintel_download_firmware(hdev, fw, &boot_param);
- if (err < 0)
+ if (err < 0) {
+ /* When FW download fails, send Intel Reset to retry
+ * FW download.
+ */
+ btintel_reset_to_bootloader(hdev);
goto done;
-
+ }
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
bt_dev_info(hdev, "Waiting for firmware download to complete");
@@ -2355,6 +2362,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
if (err) {
bt_dev_err(hdev, "Firmware loading timeout");
err = -ETIMEDOUT;
+ btintel_reset_to_bootloader(hdev);
goto done;
}
@@ -2381,8 +2389,11 @@ done:
set_bit(BTUSB_BOOTING, &data->flags);
err = btintel_send_intel_reset(hdev, boot_param);
- if (err)
+ if (err) {
+ bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err);
+ btintel_reset_to_bootloader(hdev);
return err;
+ }
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
@@ -2404,6 +2415,7 @@ done:
if (err) {
bt_dev_err(hdev, "Device boot timeout");
+ btintel_reset_to_bootloader(hdev);
return -ETIMEDOUT;
}
@@ -2432,6 +2444,13 @@ done:
*/
btintel_set_event_mask(hdev, false);
+ /* Read the Intel version information after loading the FW */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
return 0;
}
@@ -2489,8 +2508,6 @@ static int btusb_shutdown_intel_new(struct hci_dev *hdev)
return 0;
}
-#ifdef CONFIG_BT_HCIBTUSB_MTK
-
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
@@ -3051,7 +3068,6 @@ static int btusb_mtk_shutdown(struct hci_dev *hdev)
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
-#endif
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
@@ -3411,7 +3427,6 @@ static int btusb_setup_qca(struct hci_dev *hdev)
return 0;
}
-#ifdef CONFIG_BT_HCIBTUSB_BCM
static inline int __set_diag_interface(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -3498,7 +3513,6 @@ static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
return submit_or_queue_tx_urb(hdev, urb);
}
-#endif
#ifdef CONFIG_PM
static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
@@ -3724,8 +3738,8 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
-#ifdef CONFIG_BT_HCIBTUSB_BCM
- if (id->driver_info & BTUSB_BCM_PATCHRAM) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) &&
+ (id->driver_info & BTUSB_BCM_PATCHRAM)) {
hdev->manufacturer = 15;
hdev->setup = btbcm_setup_patchram;
hdev->set_diag = btusb_bcm_set_diag;
@@ -3735,7 +3749,8 @@ static int btusb_probe(struct usb_interface *intf,
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
}
- if (id->driver_info & BTUSB_BCM_APPLE) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) &&
+ (id->driver_info & BTUSB_BCM_APPLE)) {
hdev->manufacturer = 15;
hdev->setup = btbcm_setup_apple;
hdev->set_diag = btusb_bcm_set_diag;
@@ -3743,7 +3758,6 @@ static int btusb_probe(struct usb_interface *intf,
/* Broadcom LM_DIAG Interface numbers are hardcoded */
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
}
-#endif
if (id->driver_info & BTUSB_INTEL) {
hdev->manufacturer = 2;
@@ -3774,14 +3788,13 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
-#ifdef CONFIG_BT_HCIBTUSB_MTK
- if (id->driver_info & BTUSB_MEDIATEK) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) &&
+ (id->driver_info & BTUSB_MEDIATEK)) {
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
}
-#endif
if (id->driver_info & BTUSB_SWAVE) {
set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
@@ -3807,8 +3820,8 @@ static int btusb_probe(struct usb_interface *intf,
btusb_check_needs_reset_resume(intf);
}
-#ifdef CONFIG_BT_HCIBTUSB_RTL
- if (id->driver_info & BTUSB_REALTEK) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
+ (id->driver_info & BTUSB_REALTEK)) {
hdev->setup = btrtl_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
hdev->cmd_timeout = btusb_rtl_cmd_timeout;
@@ -3819,7 +3832,6 @@ static int btusb_probe(struct usb_interface *intf,
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
}
-#endif
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
@@ -3887,15 +3899,13 @@ static int btusb_probe(struct usb_interface *intf,
goto out_free_dev;
}
-#ifdef CONFIG_BT_HCIBTUSB_BCM
- if (data->diag) {
+ if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
if (!usb_driver_claim_interface(&btusb_driver,
data->diag, data))
__set_diag_interface(hdev);
else
data->diag = NULL;
}
-#endif
if (enable_autosuspend)
usb_enable_autosuspend(data->udev);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
deleted file mode 100644
index e55f06e4270f..000000000000
--- a/drivers/bluetooth/btwilink.c
+++ /dev/null
@@ -1,337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Texas Instrument's Bluetooth Driver For Shared Transport.
- *
- * Bluetooth Driver acts as interface between HCI core and
- * TI Shared Transport Layer.
- *
- * Copyright (C) 2009-2010 Texas Instruments
- * Author: Raja Mani <raja_mani@ti.com>
- * Pavan Savoy <pavan_savoy@ti.com>
- */
-
-#include <linux/platform_device.h>
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/hci.h>
-
-#include <linux/ti_wilink_st.h>
-#include <linux/module.h>
-
-/* Bluetooth Driver Version */
-#define VERSION "1.0"
-#define MAX_BT_CHNL_IDS 3
-
-/* Number of seconds to wait for registration completion
- * when ST returns PENDING status.
- */
-#define BT_REGISTER_TIMEOUT 6000 /* 6 sec */
-
-/**
- * struct ti_st - driver operation structure
- * @hdev: hci device pointer which binds to bt driver
- * @reg_status: ST registration callback status
- * @st_write: write function provided by the ST driver
- * to be used by the driver during send_frame.
- * @wait_reg_completion - completion sync between ti_st_open
- * and st_reg_completion_cb.
- */
-struct ti_st {
- struct hci_dev *hdev;
- int reg_status;
- long (*st_write) (struct sk_buff *);
- struct completion wait_reg_completion;
-};
-
-/* Increments HCI counters based on pocket ID (cmd,acl,sco) */
-static inline void ti_st_tx_complete(struct ti_st *hst, int pkt_type)
-{
- struct hci_dev *hdev = hst->hdev;
-
- /* Update HCI stat counters */
- switch (pkt_type) {
- case HCI_COMMAND_PKT:
- hdev->stat.cmd_tx++;
- break;
-
- case HCI_ACLDATA_PKT:
- hdev->stat.acl_tx++;
- break;
-
- case HCI_SCODATA_PKT:
- hdev->stat.sco_tx++;
- break;
- }
-}
-
-/* ------- Interfaces to Shared Transport ------ */
-
-/* Called by ST layer to indicate protocol registration completion
- * status.ti_st_open() function will wait for signal from this
- * API when st_register() function returns ST_PENDING.
- */
-static void st_reg_completion_cb(void *priv_data, int data)
-{
- struct ti_st *lhst = priv_data;
-
- /* Save registration status for use in ti_st_open() */
- lhst->reg_status = data;
- /* complete the wait in ti_st_open() */
- complete(&lhst->wait_reg_completion);
-}
-
-/* Called by Shared Transport layer when receive data is available */
-static long st_receive(void *priv_data, struct sk_buff *skb)
-{
- struct ti_st *lhst = priv_data;
- int err;
-
- if (!skb)
- return -EFAULT;
-
- if (!lhst) {
- kfree_skb(skb);
- return -EFAULT;
- }
-
- /* Forward skb to HCI core layer */
- err = hci_recv_frame(lhst->hdev, skb);
- if (err < 0) {
- BT_ERR("Unable to push skb to HCI core(%d)", err);
- return err;
- }
-
- lhst->hdev->stat.byte_rx += skb->len;
-
- return 0;
-}
-
-/* ------- Interfaces to HCI layer ------ */
-/* protocol structure registered with shared transport */
-static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
- {
- .chnl_id = HCI_EVENT_PKT, /* HCI Events */
- .hdr_len = sizeof(struct hci_event_hdr),
- .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
- .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
- .reserve = 8,
- },
- {
- .chnl_id = HCI_ACLDATA_PKT, /* ACL */
- .hdr_len = sizeof(struct hci_acl_hdr),
- .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
- .len_size = 2, /* sizeof(dlen) in struct hci_acl_hdr */
- .reserve = 8,
- },
- {
- .chnl_id = HCI_SCODATA_PKT, /* SCO */
- .hdr_len = sizeof(struct hci_sco_hdr),
- .offset_len_in_hdr = offsetof(struct hci_sco_hdr, dlen),
- .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
- .reserve = 8,
- },
-};
-
-/* Called from HCI core to initialize the device */
-static int ti_st_open(struct hci_dev *hdev)
-{
- unsigned long timeleft;
- struct ti_st *hst;
- int err, i;
-
- BT_DBG("%s %p", hdev->name, hdev);
-
- /* provide contexts for callbacks from ST */
- hst = hci_get_drvdata(hdev);
-
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
- ti_st_proto[i].priv_data = hst;
- ti_st_proto[i].max_frame_size = HCI_MAX_FRAME_SIZE;
- ti_st_proto[i].recv = st_receive;
- ti_st_proto[i].reg_complete_cb = st_reg_completion_cb;
-
- /* Prepare wait-for-completion handler */
- init_completion(&hst->wait_reg_completion);
- /* Reset ST registration callback status flag,
- * this value will be updated in
- * st_reg_completion_cb()
- * function whenever it called from ST driver.
- */
- hst->reg_status = -EINPROGRESS;
-
- err = st_register(&ti_st_proto[i]);
- if (!err)
- goto done;
-
- if (err != -EINPROGRESS) {
- BT_ERR("st_register failed %d", err);
- return err;
- }
-
- /* ST is busy with either protocol
- * registration or firmware download.
- */
- BT_DBG("waiting for registration "
- "completion signal from ST");
- timeleft = wait_for_completion_timeout
- (&hst->wait_reg_completion,
- msecs_to_jiffies(BT_REGISTER_TIMEOUT));
- if (!timeleft) {
- BT_ERR("Timeout(%d sec),didn't get reg "
- "completion signal from ST",
- BT_REGISTER_TIMEOUT / 1000);
- return -ETIMEDOUT;
- }
-
- /* Is ST registration callback
- * called with ERROR status?
- */
- if (hst->reg_status != 0) {
- BT_ERR("ST registration completed with invalid "
- "status %d", hst->reg_status);
- return -EAGAIN;
- }
-
-done:
- hst->st_write = ti_st_proto[i].write;
- if (!hst->st_write) {
- BT_ERR("undefined ST write function");
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
- /* Undo registration with ST */
- err = st_unregister(&ti_st_proto[i]);
- if (err)
- BT_ERR("st_unregister() failed with "
- "error %d", err);
- hst->st_write = NULL;
- }
- return -EIO;
- }
- }
- return 0;
-}
-
-/* Close device */
-static int ti_st_close(struct hci_dev *hdev)
-{
- int err, i;
- struct ti_st *hst = hci_get_drvdata(hdev);
-
- for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
- err = st_unregister(&ti_st_proto[i]);
- if (err)
- BT_ERR("st_unregister(%d) failed with error %d",
- ti_st_proto[i].chnl_id, err);
- }
-
- hst->st_write = NULL;
-
- return err;
-}
-
-static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct ti_st *hst;
- long len;
- int pkt_type;
-
- hst = hci_get_drvdata(hdev);
-
- /* Prepend skb with frame type */
- memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
-
- BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
- skb->len);
-
- /* Insert skb to shared transport layer's transmit queue.
- * Freeing skb memory is taken care in shared transport layer,
- * so don't free skb memory here.
- */
- pkt_type = hci_skb_pkt_type(skb);
- len = hst->st_write(skb);
- if (len < 0) {
- BT_ERR("ST write failed (%ld)", len);
- /* Try Again, would only fail if UART has gone bad */
- return -EAGAIN;
- }
-
- /* ST accepted our skb. So, Go ahead and do rest */
- hdev->stat.byte_tx += len;
- ti_st_tx_complete(hst, pkt_type);
-
- return 0;
-}
-
-static int bt_ti_probe(struct platform_device *pdev)
-{
- struct ti_st *hst;
- struct hci_dev *hdev;
- int err;
-
- hst = devm_kzalloc(&pdev->dev, sizeof(struct ti_st), GFP_KERNEL);
- if (!hst)
- return -ENOMEM;
-
- /* Expose "hciX" device to user space */
- hdev = hci_alloc_dev();
- if (!hdev)
- return -ENOMEM;
-
- BT_DBG("hdev %p", hdev);
-
- hst->hdev = hdev;
- hdev->bus = HCI_UART;
- hci_set_drvdata(hdev, hst);
- hdev->open = ti_st_open;
- hdev->close = ti_st_close;
- hdev->flush = NULL;
- hdev->send = ti_st_send_frame;
-
- err = hci_register_dev(hdev);
- if (err < 0) {
- BT_ERR("Can't register HCI device error %d", err);
- hci_free_dev(hdev);
- return err;
- }
-
- BT_DBG("HCI device registered (hdev %p)", hdev);
-
- dev_set_drvdata(&pdev->dev, hst);
- return 0;
-}
-
-static int bt_ti_remove(struct platform_device *pdev)
-{
- struct hci_dev *hdev;
- struct ti_st *hst = dev_get_drvdata(&pdev->dev);
-
- if (!hst)
- return -EFAULT;
-
- BT_DBG("%s", hst->hdev->name);
-
- hdev = hst->hdev;
- ti_st_close(hdev);
- hci_unregister_dev(hdev);
-
- hci_free_dev(hdev);
-
- dev_set_drvdata(&pdev->dev, NULL);
- return 0;
-}
-
-static struct platform_driver btwilink_driver = {
- .probe = bt_ti_probe,
- .remove = bt_ti_remove,
- .driver = {
- .name = "btwilink",
- },
-};
-
-module_platform_driver(btwilink_driver);
-
-/* ------ Module Info ------ */
-
-MODULE_AUTHOR("Raja Mani <raja_mani@ti.com>");
-MODULE_DESCRIPTION("Bluetooth Driver for TI Shared Transport" VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 7646636f2d18..0f73f6a686cb 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -445,9 +445,11 @@ static int bcm_open(struct hci_uart *hu)
out:
if (bcm->dev) {
+ hci_uart_set_flow_control(hu, true);
hu->init_speed = bcm->dev->init_speed;
hu->oper_speed = bcm->dev->oper_speed;
err = bcm_gpio_set_power(bcm->dev, true);
+ hci_uart_set_flow_control(hu, false);
if (err)
goto err_unset_hu;
}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 285706618f8a..d9a4c6c691e0 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -621,13 +621,6 @@ static int ll_setup(struct hci_uart *hu)
serdev_device_set_flow_control(serdev, true);
- if (hu->oper_speed)
- speed = hu->oper_speed;
- else if (hu->proto->oper_speed)
- speed = hu->proto->oper_speed;
- else
- speed = 0;
-
do {
/* Reset the Bluetooth device */
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
@@ -639,20 +632,6 @@ static int ll_setup(struct hci_uart *hu)
return err;
}
- if (speed) {
- __le32 speed_le = cpu_to_le32(speed);
- struct sk_buff *skb;
-
- skb = __hci_cmd_sync(hu->hdev,
- HCI_VS_UPDATE_UART_HCI_BAUDRATE,
- sizeof(speed_le), &speed_le,
- HCI_INIT_TIMEOUT);
- if (!IS_ERR(skb)) {
- kfree_skb(skb);
- serdev_device_set_baudrate(serdev, speed);
- }
- }
-
err = download_firmware(lldev);
if (!err)
break;
@@ -677,7 +656,25 @@ static int ll_setup(struct hci_uart *hu)
}
/* Operational speed if any */
+ if (hu->oper_speed)
+ speed = hu->oper_speed;
+ else if (hu->proto->oper_speed)
+ speed = hu->proto->oper_speed;
+ else
+ speed = 0;
+
+ if (speed) {
+ __le32 speed_le = cpu_to_le32(speed);
+ struct sk_buff *skb;
+ skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+ sizeof(speed_le), &speed_le,
+ HCI_INIT_TIMEOUT);
+ if (!IS_ERR(skb)) {
+ kfree_skb(skb);
+ serdev_device_set_baudrate(serdev, speed);
+ }
+ }
return 0;
}
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 6463350b7977..05f7f6de6863 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -520,7 +520,7 @@ static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
err = skb_pad(skb, 1);
if (err)
return err;
- skb_put_u8(skb, 0x00);
+ skb_put(skb, 1);
}
skb_queue_tail(&btdev->txq, skb);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e3164c200eac..c591a8ba9d93 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -130,8 +130,6 @@ enum qca_speed_type {
*/
struct qca_vreg {
const char *name;
- unsigned int min_uV;
- unsigned int max_uV;
unsigned int load_uA;
};
@@ -146,8 +144,8 @@ struct qca_vreg_data {
*/
struct qca_power {
struct device *dev;
- const struct qca_vreg_data *vreg_data;
struct regulator_bulk_data *vreg_bulk;
+ int num_vregs;
bool vregs_on;
};
@@ -162,7 +160,8 @@ struct qca_serdev {
const char *firmware_name;
};
-static int qca_power_setup(struct hci_uart *hu, bool on);
+static int qca_regulator_enable(struct qca_serdev *qcadev);
+static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
@@ -518,7 +517,7 @@ static int qca_open(struct hci_uart *hu)
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
- ret = qca_power_setup(hu, true);
+ ret = qca_regulator_enable(qcadev);
if (ret) {
destroy_workqueue(qca->workqueue);
kfree_skb(qca->rx_skb);
@@ -1188,7 +1187,7 @@ static int qca_wcn3990_init(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
- ret = qca_power_setup(hu, true);
+ ret = qca_regulator_enable(qcadev);
if (ret)
return ret;
@@ -1332,10 +1331,10 @@ static const struct hci_uart_proto qca_proto = {
static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
- { "vddio", 1800000, 1900000, 15000 },
- { "vddxo", 1800000, 1900000, 80000 },
- { "vddrf", 1300000, 1350000, 300000 },
- { "vddch0", 3300000, 3400000, 450000 },
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
},
.num_vregs = 4,
};
@@ -1343,19 +1342,22 @@ static const struct qca_vreg_data qca_soc_data_wcn3990 = {
static const struct qca_vreg_data qca_soc_data_wcn3998 = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
- { "vddio", 1800000, 1900000, 10000 },
- { "vddxo", 1800000, 1900000, 80000 },
- { "vddrf", 1300000, 1352000, 300000 },
- { "vddch0", 3300000, 3300000, 450000 },
+ { "vddio", 10000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
},
.num_vregs = 4,
};
static void qca_power_shutdown(struct hci_uart *hu)
{
+ struct qca_serdev *qcadev;
struct qca_data *qca = hu->priv;
unsigned long flags;
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
* data in skb's.
@@ -1367,7 +1369,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
- qca_power_setup(hu, false);
+ qca_regulator_disable(qcadev);
}
static int qca_power_off(struct hci_dev *hdev)
@@ -1383,97 +1385,71 @@ static int qca_power_off(struct hci_dev *hdev)
return 0;
}
-static int qca_enable_regulator(struct qca_vreg vregs,
- struct regulator *regulator)
+static int qca_regulator_enable(struct qca_serdev *qcadev)
{
+ struct qca_power *power = qcadev->bt_power;
int ret;
- ret = regulator_set_voltage(regulator, vregs.min_uV,
- vregs.max_uV);
- if (ret)
- return ret;
+ /* Already enabled */
+ if (power->vregs_on)
+ return 0;
- if (vregs.load_uA)
- ret = regulator_set_load(regulator,
- vregs.load_uA);
+ BT_DBG("enabling %d regulators)", power->num_vregs);
+ ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
if (ret)
return ret;
- return regulator_enable(regulator);
-
-}
-
-static void qca_disable_regulator(struct qca_vreg vregs,
- struct regulator *regulator)
-{
- regulator_disable(regulator);
- regulator_set_voltage(regulator, 0, vregs.max_uV);
- if (vregs.load_uA)
- regulator_set_load(regulator, 0);
+ power->vregs_on = true;
+ return 0;
}
-static int qca_power_setup(struct hci_uart *hu, bool on)
+static void qca_regulator_disable(struct qca_serdev *qcadev)
{
- struct qca_vreg *vregs;
- struct regulator_bulk_data *vreg_bulk;
- struct qca_serdev *qcadev;
- int i, num_vregs, ret = 0;
+ struct qca_power *power;
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
- !qcadev->bt_power->vreg_bulk)
- return -EINVAL;
-
- vregs = qcadev->bt_power->vreg_data->vregs;
- vreg_bulk = qcadev->bt_power->vreg_bulk;
- num_vregs = qcadev->bt_power->vreg_data->num_vregs;
- BT_DBG("on: %d", on);
- if (on && !qcadev->bt_power->vregs_on) {
- for (i = 0; i < num_vregs; i++) {
- ret = qca_enable_regulator(vregs[i],
- vreg_bulk[i].consumer);
- if (ret)
- break;
- }
+ if (!qcadev)
+ return;
- if (ret) {
- BT_ERR("failed to enable regulator:%s", vregs[i].name);
- /* turn off regulators which are enabled */
- for (i = i - 1; i >= 0; i--)
- qca_disable_regulator(vregs[i],
- vreg_bulk[i].consumer);
- } else {
- qcadev->bt_power->vregs_on = true;
- }
- } else if (!on && qcadev->bt_power->vregs_on) {
- /* turn off regulator in reverse order */
- i = qcadev->bt_power->vreg_data->num_vregs - 1;
- for ( ; i >= 0; i--)
- qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
+ power = qcadev->bt_power;
- qcadev->bt_power->vregs_on = false;
- }
+ /* Already disabled? */
+ if (!power->vregs_on)
+ return;
- return ret;
+ regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
+ power->vregs_on = false;
}
static int qca_init_regulators(struct qca_power *qca,
const struct qca_vreg *vregs, size_t num_vregs)
{
+ struct regulator_bulk_data *bulk;
+ int ret;
int i;
- qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
- sizeof(struct regulator_bulk_data),
- GFP_KERNEL);
- if (!qca->vreg_bulk)
+ bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
+ if (!bulk)
return -ENOMEM;
for (i = 0; i < num_vregs; i++)
- qca->vreg_bulk[i].supply = vregs[i].name;
+ bulk[i].supply = vregs[i].name;
+
+ ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
+ if (ret < 0)
+ return ret;
- return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
+ for (i = 0; i < num_vregs; i++) {
+ ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
+ if (ret)
+ return ret;
+ }
+
+ qca->vreg_bulk = bulk;
+ qca->num_vregs = num_vregs;
+
+ return 0;
}
static int qca_serdev_probe(struct serdev_device *serdev)
@@ -1500,7 +1476,6 @@ static int qca_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
qcadev->bt_power->dev = &serdev->dev;
- qcadev->bt_power->vreg_data = data;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
if (err) {
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52c7e15143d6..c8b1c3842c1a 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -104,10 +104,8 @@ static int __fsl_mc_device_match(struct device *dev, void *data)
return fsl_mc_device_match(mc_dev, obj_desc);
}
-static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
- *obj_desc,
- struct fsl_mc_device
- *mc_bus_dev)
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev)
{
struct device *dev;
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 0fe3f52ae0de..602f030d84eb 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -554,3 +554,56 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENOTCONN if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return -ENOTCONN;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5c9bf2e06552..a07cc19becdb 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -166,42 +166,52 @@ EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
static struct device_type *fsl_mc_get_device_type(const char *type)
{
@@ -702,6 +712,39 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev, *endpoint;
+ struct fsl_mc_obj_desc endpoint_desc = { 0 };
+ struct dprc_endpoint endpoint1 = { 0 };
+ struct dprc_endpoint endpoint2 = { 0 };
+ int state, err;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ strcpy(endpoint1.type, mc_dev->obj_desc.type);
+ endpoint1.id = mc_dev->obj_desc.id;
+
+ err = dprc_get_connection(mc_bus_dev->mc_io, 0,
+ mc_bus_dev->mc_handle,
+ &endpoint1, &endpoint2,
+ &state);
+
+ if (err == -ENOTCONN || state == -1)
+ return ERR_PTR(-ENOTCONN);
+
+ if (err < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_connection() = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ strcpy(endpoint_desc.type, endpoint2.type);
+ endpoint_desc.id = endpoint2.id;
+ endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+
+ return endpoint;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint);
+
static int parse_mc_ranges(struct device *dev,
int *paddr_cells,
int *mc_addr_cells,
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 020fcc04ec8b..21ca8c756ee7 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -105,6 +105,8 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
struct dprc_cmd_open {
__le32 container_id;
};
@@ -228,6 +230,22 @@ struct dprc_cmd_set_obj_irq {
u8 obj_type[16];
};
+struct dprc_cmd_get_connection {
+ __le32 ep1_id;
+ __le16 ep1_interface_id;
+ u8 pad[2];
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ __le64 pad[3];
+ __le32 ep2_id;
+ __le16 ep2_interface_id;
+ __le16 pad1;
+ u8 ep2_type[16];
+ __le32 state;
+};
+
/*
* DPRC API for managing and querying DPAA resources
*/
@@ -392,6 +410,27 @@ int dprc_get_container_id(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int *container_id);
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ u16 if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+
/*
* Data Path Buffer Pool (DPBP) API
*/
@@ -574,4 +613,7 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev);
+struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_device *mc_bus_dev);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index ad50efb470aa..2b6670daf7fc 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -74,6 +74,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @clk_disable_quirk: module specific clock disable quirk
* @reset_done_quirk: module specific reset done quirk
* @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
*/
struct sysc {
struct device *dev;
@@ -100,6 +101,7 @@ struct sysc {
void (*clk_disable_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
void (*module_enable_quirk)(struct sysc *sysc);
+ void (*module_disable_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -959,6 +961,9 @@ static int sysc_disable_module(struct device *dev)
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
+ if (ddata->module_disable_quirk)
+ ddata->module_disable_quirk(ddata);
+
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
@@ -1248,6 +1253,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_MODULE_QUIRK_SGX),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
+ /* Watchdog on am3 and am4 */
+ SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+ SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -1440,14 +1448,14 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
- dev_warn(ddata->dev, "wdt disable spr failed\n");
+ dev_warn(ddata->dev, "wdt disable step1 failed\n");
- sysc_write(ddata, wps, 0x5555);
+ sysc_write(ddata, spr, 0x5555);
error = readl_poll_timeout(ddata->module_va + wps, val,
!(val & 0x10), 100,
MAX_MODULE_SOFTRESET_WAIT);
if (error)
- dev_warn(ddata->dev, "wdt disable wps failed\n");
+ dev_warn(ddata->dev, "wdt disable step2 failed\n");
}
static void sysc_init_module_quirks(struct sysc *ddata)
@@ -1471,8 +1479,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+ ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c2f7de9dc543..de434feb873a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2520,4 +2520,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
else
add_device_randomness(buf, size);
}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index b57fe09b428b..9dd6185a4b4e 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -683,7 +683,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst
{ DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
{ DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
{ DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
- { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" },
+ { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" },
{ DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
{ DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
{ DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
@@ -828,8 +828,8 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
- DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"),
- DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"),
+ DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"),
+ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"),
DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index d8c2bd4391d0..11ff701ff4bb 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -25,7 +25,9 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
struct clock_event_device *clkevt = &to->clkevt;
- of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) :
+ if (of_irq->percpu)
+ free_percpu_irq(of_irq->irq, clkevt);
+ else
free_irq(of_irq->irq, clkevt);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c52d6fa32aac..48a224a6b178 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -720,7 +720,7 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
- ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+ ret = freq_qos_update_request(policy->object##_freq_req, val);\
return ret >= 0 ? count : ret; \
}
@@ -1202,19 +1202,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus;
}
+ freq_constraints_init(&policy->constraints);
+
policy->nb_min.notifier_call = cpufreq_notifier_min;
policy->nb_max.notifier_call = cpufreq_notifier_max;
- ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
if (ret) {
dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus));
goto err_kobj_remove;
}
- ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
- DEV_PM_QOS_MAX_FREQUENCY);
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
if (ret) {
dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus));
@@ -1232,8 +1234,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
return policy;
err_min_qos_notifier:
- dev_pm_qos_remove_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
err_kobj_remove:
cpufreq_policy_put_kobj(policy);
err_free_real_cpus:
@@ -1250,7 +1252,6 @@ err_free_policy:
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
- struct device *dev = get_cpu_device(policy->cpu);
unsigned long flags;
int cpu;
@@ -1262,10 +1263,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- dev_pm_qos_remove_notifier(dev, &policy->nb_max,
- DEV_PM_QOS_MAX_FREQUENCY);
- dev_pm_qos_remove_notifier(dev, &policy->nb_min,
- DEV_PM_QOS_MIN_FREQUENCY);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
+
+ /* Cancel any pending policy->update work before freeing the policy. */
+ cancel_work_sync(&policy->update);
if (policy->max_freq_req) {
/*
@@ -1274,10 +1278,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
- dev_pm_qos_remove_request(policy->max_freq_req);
+ freq_qos_remove_request(policy->max_freq_req);
}
- dev_pm_qos_remove_request(policy->min_freq_req);
+ freq_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req);
cpufreq_policy_put_kobj(policy);
@@ -1357,8 +1361,6 @@ static int cpufreq_online(unsigned int cpu)
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (new_policy) {
- struct device *dev = get_cpu_device(cpu);
-
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j);
@@ -1369,36 +1371,31 @@ static int cpufreq_online(unsigned int cpu)
if (!policy->min_freq_req)
goto out_destroy_policy;
- ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
- DEV_PM_QOS_MIN_FREQUENCY,
- policy->min);
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->min_freq_req, FREQ_QOS_MIN,
+ policy->min);
if (ret < 0) {
/*
- * So we don't call dev_pm_qos_remove_request() for an
+ * So we don't call freq_qos_remove_request() for an
* uninitialized request.
*/
kfree(policy->min_freq_req);
policy->min_freq_req = NULL;
-
- dev_err(dev, "Failed to add min-freq constraint (%d)\n",
- ret);
goto out_destroy_policy;
}
/*
* This must be initialized right here to avoid calling
- * dev_pm_qos_remove_request() on uninitialized request in case
+ * freq_qos_remove_request() on uninitialized request in case
* of errors.
*/
policy->max_freq_req = policy->min_freq_req + 1;
- ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
- DEV_PM_QOS_MAX_FREQUENCY,
- policy->max);
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->max_freq_req, FREQ_QOS_MAX,
+ policy->max);
if (ret < 0) {
policy->max_freq_req = NULL;
- dev_err(dev, "Failed to add max-freq constraint (%d)\n",
- ret);
goto out_destroy_policy;
}
@@ -2374,7 +2371,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int ret;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2386,8 +2382,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
* PM QoS framework collects all the requests from users and provide us
* the final aggregated value here.
*/
- new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
- new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+ new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(new_policy);
@@ -2518,7 +2514,7 @@ static int cpufreq_boost_set_sw(int state)
break;
}
- ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0)
break;
}
@@ -2737,14 +2733,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
- .shutdown = cpufreq_suspend,
-};
-
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
@@ -2756,8 +2744,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
- register_syscore_ops(&cpufreq_syscore_ops);
-
return 0;
}
module_param(off, int, 0444);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9f02de9a1b47..53a51c169451 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1088,10 +1088,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
static struct cpufreq_driver intel_pstate;
-static void update_qos_request(enum dev_pm_qos_req_type type)
+static void update_qos_request(enum freq_qos_req_type type)
{
int max_state, turbo_max, freq, i, perf_pct;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
struct cpufreq_policy *policy;
for_each_possible_cpu(i) {
@@ -1112,7 +1112,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
else
turbo_max = cpu->pstate.turbo_pstate;
- if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+ if (type == FREQ_QOS_MIN) {
perf_pct = global.min_perf_pct;
} else {
req++;
@@ -1122,7 +1122,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
freq *= cpu->pstate.scaling;
- if (dev_pm_qos_update_request(req, freq) < 0)
+ if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", i);
}
}
@@ -1153,7 +1153,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
+ update_qos_request(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1187,7 +1187,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
+ update_qos_request(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
@@ -2381,7 +2381,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int max_state, turbo_max, min_freq, max_freq, ret;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
struct cpudata *cpu;
struct device *dev;
@@ -2416,15 +2416,15 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
max_freq *= cpu->pstate.scaling;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
- min_freq);
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+ min_freq);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_req;
}
- ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
- max_freq);
+ ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+ max_freq);
if (ret < 0) {
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
goto remove_min_req;
@@ -2435,7 +2435,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0;
remove_min_req:
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req);
free_req:
kfree(req);
pstate_exit:
@@ -2446,12 +2446,12 @@ pstate_exit:
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
req = policy->driver_data;
- dev_pm_qos_remove_request(req + 1);
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req + 1);
+ freq_qos_remove_request(req);
kfree(req);
return intel_pstate_cpu_exit(policy);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index bc9dd30395c4..037fe23bc6ed 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
{
struct cpufreq_policy *policy;
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
u8 node, slow_mode;
int cpu, ret;
@@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
req = policy->driver_data;
- ret = dev_pm_qos_update_request(req,
+ ret = freq_qos_update_request(req,
policy->freq_table[slow_mode].frequency);
if (ret < 0)
pr_warn("Failed to update freq constraint: %d\n", ret);
@@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = {
void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req;
+ struct freq_qos_request *req;
int ret;
if (!cbe_cpufreq_has_pmi)
@@ -113,9 +113,8 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
if (!req)
return;
- ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
- DEV_PM_QOS_MAX_FREQUENCY,
- policy->freq_table[0].frequency);
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+ policy->freq_table[0].frequency);
if (ret < 0) {
pr_err("Failed to add freq constraint (%d)\n", ret);
kfree(req);
@@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
{
- struct dev_pm_qos_request *req = policy->driver_data;
+ struct freq_qos_request *req = policy->driver_data;
if (cbe_cpufreq_has_pmi) {
- dev_pm_qos_remove_request(req);
+ freq_qos_remove_request(req);
kfree(req);
}
}
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index 932390b028f1..b0ce9bc78113 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -95,6 +95,10 @@ static int __init haltpoll_init(void)
int ret;
struct cpuidle_driver *drv = &haltpoll_driver;
+ /* Do not load haltpoll if idle= is passed */
+ if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+ return -ENODEV;
+
cpuidle_poll_state_init(drv);
if (!kvm_para_available() ||
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 250150560e68..91e424378217 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -35,7 +35,7 @@ config CHELSIO_IPSEC_INLINE
config CRYPTO_DEV_CHELSIO_TLS
tristate "Chelsio Crypto Inline TLS Driver"
depends on CHELSIO_T4
- depends on TLS
+ depends on TLS_TOE
select CRYPTO_DEV_CHELSIO
---help---
Support Chelsio Inline TLS with Chelsio crypto accelerator.
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index 025c831d0899..d2bc655ab931 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -21,6 +21,7 @@
#include <crypto/internal/hash.h>
#include <linux/tls.h>
#include <net/tls.h>
+#include <net/tls_toe.h>
#include "t4fw_api.h"
#include "t4_msg.h"
@@ -118,7 +119,7 @@ struct tls_scmd {
};
struct chtls_dev {
- struct tls_device tlsdev;
+ struct tls_toe_device tlsdev;
struct list_head list;
struct cxgb4_lld_info *lldi;
struct pci_dev *pdev;
@@ -362,7 +363,7 @@ enum {
#define TCP_PAGE(sk) (sk->sk_frag.page)
#define TCP_OFF(sk) (sk->sk_frag.offset)
-static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev)
+static inline struct chtls_dev *to_chtls_dev(struct tls_toe_device *tlsdev)
{
return container_of(tlsdev, struct chtls_dev, tlsdev);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 774d991d7cca..aca75237bbcf 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1297,7 +1297,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
+ inet_sk(sk)->inet_id = prandom_u32();
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 0891ab829b1b..98bc5a4cd5e7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
return peekmsg(sk, msg, len, nonblock, flags);
if (sk_can_busy_loop(sk) &&
- skb_queue_empty(&sk->sk_receive_queue) &&
+ skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
sk_busy_loop(sk, nonblock);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index e6df5b95ed47..18996935d8ba 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -124,7 +124,7 @@ static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
mutex_unlock(&notify_mutex);
}
-static int chtls_inline_feature(struct tls_device *dev)
+static int chtls_inline_feature(struct tls_toe_device *dev)
{
struct net_device *netdev;
struct chtls_dev *cdev;
@@ -140,7 +140,7 @@ static int chtls_inline_feature(struct tls_device *dev)
return 0;
}
-static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
+static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
@@ -149,7 +149,7 @@ static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
return 0;
}
-static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
+static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
{
struct chtls_dev *cdev = to_chtls_dev(dev);
@@ -161,7 +161,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
{
int i;
- tls_unregister_device(&cdev->tlsdev);
+ tls_toe_unregister_device(&cdev->tlsdev);
kvfree(cdev->kmap.addr);
idr_destroy(&cdev->hwtid_idr);
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
@@ -173,27 +173,27 @@ static void chtls_free_uld(struct chtls_dev *cdev)
static inline void chtls_dev_release(struct kref *kref)
{
+ struct tls_toe_device *dev;
struct chtls_dev *cdev;
- struct tls_device *dev;
- dev = container_of(kref, struct tls_device, kref);
+ dev = container_of(kref, struct tls_toe_device, kref);
cdev = to_chtls_dev(dev);
chtls_free_uld(cdev);
}
static void chtls_register_dev(struct chtls_dev *cdev)
{
- struct tls_device *tlsdev = &cdev->tlsdev;
+ struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX);
+ strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
- TLS_DEVICE_NAME_MAX);
+ TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tlsdev->release = chtls_dev_release;
kref_init(&tlsdev->kref);
- tls_register_device(tlsdev);
+ tls_toe_register_device(tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 42a8f3f11681..709002515550 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -471,7 +471,7 @@ unlock:
if (pfence_excl)
*pfence_excl = fence_excl;
else if (fence_excl)
- shared[++shared_count] = fence_excl;
+ shared[shared_count++] = fence_excl;
if (!shared_count) {
kfree(shared);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 9ba74ab7e912..c27e206a764c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
if (!sdma->script_number)
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
+ / sizeof(s32)) {
+ dev_err(sdma->dev,
+ "SDMA script number %d not match with firmware.\n",
+ sdma->script_number);
+ return;
+ }
+
for (i = 0; i < sdma->script_number; i++)
if (addr_arr[i] > 0)
saddr_arr[i] = addr_arr[i];
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 8e90a405939d..ef73f65224b1 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
+ /*
+ * If we have transactions queued, then some might be committed to the
+ * hardware in the desc fifo. The only way to reset the desc fifo is
+ * to do a hardware reset (either by pipe or the entire block).
+ * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+ * pipe. If the pipe is left disabled (default state after pipe reset)
+ * and is accessed by a connected hardware engine, a fatal error in
+ * the BAM will occur. There is a small window where this could happen
+ * with bam_chan_init_hw(), but it is assumed that the caller has
+ * stopped activity on any attached hardware engine. Make sure to do
+ * this first so that the BAM hardware doesn't cause memory corruption
+ * by accessing freed resources.
+ */
+ if (!list_empty(&bchan->desc_list)) {
+ async_desc = list_first_entry(&bchan->desc_list,
+ struct bam_async_desc, desc_node);
+ bam_chan_init_hw(bchan, async_desc->dir);
+ }
+
list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) {
list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 525dc7338fe3..8546ad034720 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -134,6 +134,10 @@
#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
+/* SPRD DMA_SRC_BLK_STEP register definition */
+#define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
+#define SPRD_DMA_LLIST_HIGH_SHIFT 28
+
/* define DMA channel mode & trigger mode mask */
#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
@@ -208,6 +212,7 @@ struct sprd_dma_dev {
struct sprd_dma_chn channels[0];
};
+static void sprd_dma_free_desc(struct virt_dma_desc *vd);
static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info sprd_dma_info = {
.filter_fn = sprd_dma_filter_fn,
@@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
static void sprd_dma_free_chan_resources(struct dma_chan *chan)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct virt_dma_desc *cur_vd = NULL;
unsigned long flags;
spin_lock_irqsave(&schan->vc.lock, flags);
+ if (schan->cur_desc)
+ cur_vd = &schan->cur_desc->vd;
+
sprd_dma_stop(schan);
spin_unlock_irqrestore(&schan->vc.lock, flags);
+ if (cur_vd)
+ sprd_dma_free_desc(cur_vd);
+
vchan_free_chan_resources(&schan->vc);
pm_runtime_put(chan->device->dev);
}
@@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step;
u32 temp, fix_mode = 0, fix_en = 0;
+ phys_addr_t llist_ptr;
if (dir == DMA_MEM_TO_DEV) {
src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
@@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
* Set the link-list pointer point to next link-list
* configuration's physical address.
*/
- hw->llist_ptr = schan->linklist.phy_addr + temp;
+ llist_ptr = schan->linklist.phy_addr + temp;
+ hw->llist_ptr = lower_32_bits(llist_ptr);
+ hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
+ SPRD_DMA_LLIST_HIGH_MASK;
} else {
hw->llist_ptr = 0;
+ hw->src_blk_step = 0;
}
hw->frg_step = 0;
- hw->src_blk_step = 0;
hw->des_blk_step = 0;
return 0;
}
@@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan)
static int sprd_dma_terminate_all(struct dma_chan *chan)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct virt_dma_desc *cur_vd = NULL;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&schan->vc.lock, flags);
+ if (schan->cur_desc)
+ cur_vd = &schan->cur_desc->vd;
+
sprd_dma_stop(schan);
vchan_get_all_descriptors(&schan->vc, &head);
spin_unlock_irqrestore(&schan->vc.lock, flags);
+ if (cur_vd)
+ sprd_dma_free_desc(cur_vd);
+
vchan_dma_desc_free_list(&schan->vc, &head);
return 0;
}
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 5f8adf5c1f20..6e1268552f74 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -40,6 +40,7 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
#define ADMA_CH_FIFO_CTRL 0x2c
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
@@ -77,6 +78,7 @@ struct tegra_adma;
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
* @ch_base_offset: Register offset of DMA channel registers.
+ * @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available.
@@ -95,6 +97,7 @@ struct tegra_adma_chip_data {
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
+ bool has_outstanding_reqs;
};
/*
@@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+ if (cdata->has_outstanding_reqs)
+ ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
@@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
.ch_base_offset = 0,
+ .has_outstanding_reqs = false,
.ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0xf,
.ch_req_max = 10,
@@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
.ch_base_offset = 0x10000,
+ .has_outstanding_reqs = true,
.ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0x1f,
.ch_req_max = 20,
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 2f946f55076c..8c2f7ebe998c 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct cppi41_dd *cdd = c->cdd;
struct cppi41_desc *d;
struct scatterlist *sg;
unsigned int i;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
+ return NULL;
+ }
+
+ if (cdd->is_suspended)
+ goto err_out_not_ready;
d = c->desc;
for_each_sg(sgl, sg, sg_len, i) {
@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
d++;
}
- return &c->txd;
+ txd = &c->txd;
+
+err_out_not_ready:
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+ return txd;
}
static void cppi41_compute_td_desc(struct cppi41_desc *d)
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index e7dc3c4dc8e0..5d56f1e4d332 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -68,6 +68,9 @@
#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
#define XILINX_DMA_REG_DMASR 0x0004
#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
@@ -1354,7 +1357,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
node);
hw = &segment->hw;
- xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+ xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+ xilinx_prep_dma_addr_t(hw->buf_addr));
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -2117,8 +2121,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.gen_lock = cfg->gen_lock;
chan->config.master = cfg->master;
+ dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
if (cfg->gen_lock && chan->genlock) {
dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+ dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
}
@@ -2134,11 +2140,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.delay = cfg->delay;
if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
chan->config.coalesc = cfg->coalesc;
}
if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
chan->config.delay = cfg->delay;
}
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index d413a0bdc9ad..0bb62857ffb2 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes)
if (!ghes_pvt)
return;
+ if (atomic_dec_return(&ghes_init))
+ return;
+
mci = ghes_pvt->mci;
+ ghes_pvt = NULL;
edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
}
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
index d03ed8e43ad7..8e3d355a637a 100644
--- a/drivers/firmware/broadcom/Kconfig
+++ b/drivers/firmware/broadcom/Kconfig
@@ -22,3 +22,11 @@ config BCM47XX_SPROM
In case of SoC devices SPROM content is stored on a flash used by
bootloader firmware CFE. This driver provides method to ssb and bcma
drivers to read SPROM on SoC.
+
+config TEE_BNXT_FW
+ tristate "Broadcom BNXT firmware manager"
+ depends on (ARCH_BCM_IPROC && OPTEE) || (COMPILE_TEST && TEE)
+ default ARCH_BCM_IPROC
+ help
+ This module help to manage firmware on Broadcom BNXT device. The module
+ registers on tee bus and invoke calls to manage firmware on BNXT device.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
index 72c7fdc20c77..17c5061c47a7 100644
--- a/drivers/firmware/broadcom/Makefile
+++ b/drivers/firmware/broadcom/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o
obj-$(CONFIG_BCM47XX_SPROM) += bcm47xx_sprom.o
+obj-$(CONFIG_TEE_BNXT_FW) += tee_bnxt_fw.o
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
new file mode 100644
index 000000000000..5b7ef89eb701
--- /dev/null
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+
+#define MAX_SHM_MEM_SZ SZ_4M
+
+#define MAX_TEE_PARAM_ARRY_MEMB 4
+
+enum ta_cmd {
+ /*
+ * TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
+ *
+ * param[0] unused
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
+ */
+ TA_CMD_BNXT_FASTBOOT = 0,
+
+ /*
+ * TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
+ *
+ * param[0] (inout memref) - Coredump buffer memory reference
+ * param[1] (in value) - value.a: offset, data to be copied from
+ * value.b: size of data to be copied
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
+ */
+ TA_CMD_BNXT_COPY_COREDUMP = 3,
+};
+
+/**
+ * struct tee_bnxt_fw_private - OP-TEE bnxt private data
+ * @dev: OP-TEE based bnxt device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: TA session identifier.
+ */
+struct tee_bnxt_fw_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *fw_shm_pool;
+};
+
+static struct tee_bnxt_fw_private pvt_data;
+
+static void prepare_args(int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = pvt_data.session_id;
+ arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
+
+ /* Fill invoke cmd params */
+ switch (cmd) {
+ case TA_CMD_BNXT_COPY_COREDUMP:
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data.fw_shm_pool;
+ param[0].u.memref.size = MAX_SHM_MEM_SZ;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ break;
+ case TA_CMD_BNXT_FASTBOOT:
+ default:
+ /* Nothing to do */
+ break;
+ }
+}
+
+/**
+ * tee_bnxt_fw_load() - Load the bnxt firmware
+ * Uses an OP-TEE call to start a secure
+ * boot process.
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_fw_load(void)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_fw_load);
+
+/**
+ * tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
+ * Uses an OP-TEE call to copy coredump
+ * @buf: destination buffer where core dump is copied into
+ * @offset: offset from the base address of core dump area
+ * @size: size of the dump
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
+{
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+ void *core_data;
+ u32 rbytes = size;
+ u32 nbytes = 0;
+ int ret = 0;
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
+
+ while (rbytes) {
+ nbytes = rbytes;
+
+ nbytes = min_t(u32, rbytes, param[0].u.memref.size);
+
+ /* Fill additional invoke cmd params */
+ param[1].u.value.a = offset;
+ param[1].u.value.b = nbytes;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
+ if (IS_ERR(core_data)) {
+ dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
+ return PTR_ERR(core_data);
+ }
+
+ memcpy(buf, core_data, nbytes);
+
+ rbytes -= nbytes;
+ buf += nbytes;
+ offset += nbytes;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_copy_coredump);
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return (ver->impl_id == TEE_IMPL_ID_OPTEE);
+}
+
+static int tee_bnxt_fw_probe(struct device *dev)
+{
+ struct tee_client_device *bnxt_device = to_tee_client_device(dev);
+ int ret, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *fw_shm_pool;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with Bnxt load Trusted App */
+ memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if (ret < 0 || sess_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ pvt_data.dev = dev;
+
+ fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(fw_shm_pool)) {
+ tee_client_close_context(pvt_data.ctx);
+ dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
+ err = PTR_ERR(fw_shm_pool);
+ goto out_sess;
+ }
+
+ pvt_data.fw_shm_pool = fw_shm_pool;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int tee_bnxt_fw_remove(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+
+ return 0;
+}
+
+static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
+ {UUID_INIT(0x6272636D, 0x2019, 0x0716,
+ 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
+
+static struct tee_client_driver tee_bnxt_fw_driver = {
+ .id_table = tee_bnxt_fw_id_table,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .bus = &tee_bus_type,
+ .probe = tee_bnxt_fw_probe,
+ .remove = tee_bnxt_fw_remove,
+ },
+};
+
+static int __init tee_bnxt_fw_mod_init(void)
+{
+ return driver_register(&tee_bnxt_fw_driver.driver);
+}
+
+static void __exit tee_bnxt_fw_mod_exit(void)
+{
+ driver_unregister(&tee_bnxt_fw_driver.driver);
+}
+
+module_init(tee_bnxt_fw_mod_init);
+module_exit(tee_bnxt_fw_mod_exit);
+
+MODULE_AUTHOR("Vikas Gupta <vikas.gupta@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 35ed56b9c34f..1e21fc3e9851 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -408,7 +408,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
bytes = ~0ull;
else if (size & 0x8000)
bytes = (u64)(size & 0x7fff) << 10;
- else if (size != 0x7fff)
+ else if (size != 0x7fff || dm->length < 0x20)
bytes = (u64)size << 20;
else
bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 178ee8106828..b248870a9806 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -182,6 +182,7 @@ config RESET_ATTACK_MITIGATION
config EFI_RCI2_TABLE
bool "EFI Runtime Configuration Interface Table Version 2 Support"
+ depends on X86 || COMPILE_TEST
help
Displays the content of the Runtime Configuration Interface
Table version 2 on Dell EMC PowerEdge systems as a binary
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index addf0749dd8b..b1af0de2e100 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -381,7 +381,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
- printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8d3e778e988b..e98bbf8e56d9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -267,6 +267,9 @@ static __init int efivar_ssdt_load(void)
void *data;
int ret;
+ if (!efivar_ssdt[0])
+ return 0;
+
ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
list_for_each_entry_safe(entry, aux, &entries, list) {
@@ -551,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
sizeof(*seed) + size);
if (seed != NULL) {
pr_notice("seeding entropy pool\n");
- add_device_randomness(seed->bits, seed->size);
+ add_bootloader_randomness(seed->bits, seed->size);
early_memunmap(seed, sizeof(*seed) + size);
} else {
pr_err("Could not map UEFI random seed!\n");
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 0460c7581220..ee0661ddb25b 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -52,6 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
+CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index e8f7aefb6813..41213bf5fcf5 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -195,6 +195,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
unsigned long dram_base,
efi_loaded_image_t *image)
{
+ unsigned long kernel_base;
efi_status_t status;
/*
@@ -204,9 +205,18 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
* loaded. These assumptions are made by the decompressor,
* before any memory map is available.
*/
- dram_base = round_up(dram_base, SZ_128M);
+ kernel_base = round_up(dram_base, SZ_128M);
- status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+ /*
+ * Note that some platforms (notably, the Raspberry Pi 2) put
+ * spin-tables and other pieces of firmware at the base of RAM,
+ * abusing the fact that the window of TEXT_OFFSET bytes at the
+ * base of the kernel image is only partially used at the moment.
+ * (Up to 5 pages are used for the swapper page tables)
+ */
+ kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
+
+ status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
reserve_size);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
@@ -220,7 +230,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
*image_size = image->image_size;
status = efi_relocate_kernel(sys_table, image_addr, *image_size,
*image_size,
- dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+ kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel.\n");
efi_free(sys_table, *reserve_size, *reserve_addr);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 3caae7f2cf56..35dbc2791c97 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -260,11 +260,11 @@ fail:
}
/*
- * Allocate at the lowest possible address.
+ * Allocate at the lowest possible address that is not below 'min'.
*/
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr)
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
{
unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
@@ -311,13 +311,8 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
start = desc->phys_addr;
end = start + desc->num_pages * EFI_PAGE_SIZE;
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- if (start == 0x0)
- start += 8;
+ if (start < min)
+ start = min;
start = round_up(start, align);
if ((start + size) > end)
@@ -698,7 +693,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
unsigned long image_size,
unsigned long alloc_size,
unsigned long preferred_addr,
- unsigned long alignment)
+ unsigned long alignment,
+ unsigned long min_addr)
{
unsigned long cur_image_addr;
unsigned long new_addr = 0;
@@ -731,8 +727,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
* possible.
*/
if (status != EFI_SUCCESS) {
- status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
- &new_addr);
+ status = efi_low_alloc_above(sys_table_arg, alloc_size,
+ alignment, &new_addr, min_addr);
}
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
index 3e290f96620a..76b0c354a027 100644
--- a/drivers/firmware/efi/rci2-table.c
+++ b/drivers/firmware/efi/rci2-table.c
@@ -76,7 +76,7 @@ static u16 checksum(void)
return chksum;
}
-int __init efi_rci2_sysfs_init(void)
+static int __init efi_rci2_sysfs_init(void)
{
struct kobject *tables_kobj;
int ret = -ENOMEM;
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 877745c3aaf2..7baf48c01e72 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/efi.h>
+#include <linux/security.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -717,6 +718,13 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
static int efi_test_open(struct inode *inode, struct file *file)
{
+ int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+ if (ret)
+ return ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
/*
* nothing special to do here
* We do accept multiple open files at the same time as we
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 1d3f5ca3eaaf..31f9f0e369b9 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -40,7 +40,7 @@ int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
struct efi_tcg2_final_events_table *final_tbl;
- unsigned int tbl_size;
+ int tbl_size;
int ret = 0;
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -75,16 +75,29 @@ int __init efi_tpm_eventlog_init(void)
goto out;
}
- tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
- + sizeof(final_tbl->version)
- + sizeof(final_tbl->nr_events),
- final_tbl->nr_events,
- log_tbl->log);
+ tbl_size = 0;
+ if (final_tbl->nr_events != 0) {
+ void *events = (void *)efi.tpm_final_log
+ + sizeof(final_tbl->version)
+ + sizeof(final_tbl->nr_events);
+
+ tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
+ }
+
+ if (tbl_size < 0) {
+ pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+ ret = -EINVAL;
+ goto out_calc;
+ }
+
memblock_reserve((unsigned long)final_tbl,
tbl_size + sizeof(*final_tbl));
- early_memunmap(final_tbl, sizeof(*final_tbl));
efi_tpm_final_log_size = tbl_size;
+out_calc:
+ early_memunmap(final_tbl, sizeof(*final_tbl));
out:
early_memunmap(log_tbl, sizeof(*log_tbl));
return ret;
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index dda525c0f968..5c6f2a74f104 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -52,7 +52,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
if (max_len - consumed < *entry_len)
return VPD_FAIL;
- consumed += decoded_len;
+ consumed += *entry_len;
*_consumed = consumed;
return VPD_OK;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index fe7a73f52329..bb287f35cf40 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -530,11 +530,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
}
for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
- girq = irq_find_mapping(chip->irq.domain,
- bank * SPRD_EIC_PER_BANK_NR + n);
+ u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+ girq = irq_find_mapping(chip->irq.domain, offset);
generic_handle_irq(girq);
- sprd_eic_toggle_trigger(chip, girq, n);
+ sprd_eic_toggle_trigger(chip, girq, offset);
}
}
}
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 4d835f9089df..86a10c808ef6 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -293,8 +293,9 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
chip->irq_eoi(data);
}
-static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+static int intel_mid_irq_init_hw(struct gpio_chip *chip)
{
+ struct intel_mid_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned base;
@@ -309,6 +310,8 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
reg = gpio_reg(&priv->chip, base, GEDR);
writel(~0, reg);
}
+
+ return 0;
}
static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
@@ -372,6 +375,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
girq = &priv->chip.irq;
girq->chip = &intel_mid_irqchip;
+ girq->init_hw = intel_mid_irq_init_hw;
girq->parent_handler = intel_mid_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -384,9 +388,8 @@ static int intel_gpio_probe(struct pci_dev *pdev,
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
- intel_mid_irq_init_hw(priv);
-
pci_set_drvdata(pdev, priv);
+
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 6bb9741ad036..e9e47c0d5be7 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -294,8 +294,9 @@ static struct irq_chip lp_irqchip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
{
+ struct lp_gpio *lg = gpiochip_get_data(chip);
unsigned long reg;
unsigned base;
@@ -307,6 +308,8 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
outl(0xffffffff, reg);
}
+
+ return 0;
}
static int lp_gpio_probe(struct platform_device *pdev)
@@ -364,6 +367,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
girq = &gc->irq;
girq->chip = &lp_irqchip;
+ girq->init_hw = lp_gpio_irq_init_hw;
girq->parent_handler = lp_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -373,9 +377,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->parents[0] = (unsigned)irq_rc->start;
girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_simple_irq;
-
- lp_gpio_irq_init_hw(lg);
+ girq->handler = handle_bad_irq;
}
ret = devm_gpiochip_add_data(dev, gc, lg);
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 47d05e357e61..faf86ea9c51a 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
- case 1 ... 8:
+ case 1000 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
- case 9 ... 16:
+ case 9000 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
- case 17 ... 32:
+ case 17000 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 4f27ddfe1e2f..2f1e9da81c1e 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -362,8 +362,9 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
-static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+static int mrfld_irq_init_hw(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
@@ -375,6 +376,8 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
+
+ return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -447,6 +450,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
+ girq->init_hw = mrfld_irq_init_hw;
girq->parent_handler = mrfld_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -455,11 +459,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
if (!girq->parents)
return -ENOMEM;
girq->parents[0] = pdev->irq;
+ girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- mrfld_irq_init_hw(priv);
-
pci_set_drvdata(pdev, priv);
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 1eea2c6c2e1d..80ea49f570f4 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -317,7 +317,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
transitory = flags & OF_GPIO_TRANSITORY;
ret = gpiod_request(desc, label);
- if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+ if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
return desc;
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bdbc1649eafa..104ed299d5ea 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -86,6 +86,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1406,6 +1407,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
machine_gpiochip_add(chip);
+ ret = gpiochip_irqchip_init_hw(chip);
+ if (ret)
+ goto err_remove_acpi_chip;
+
ret = gpiochip_irqchip_init_valid_mask(chip);
if (ret)
goto err_remove_acpi_chip;
@@ -1622,6 +1627,16 @@ static struct gpio_chip *find_chip_by_name(const char *name)
* The following is irqchip helper code for gpiochips.
*/
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
+{
+ struct gpio_irq_chip *girq = &gc->irq;
+
+ if (!girq->init_hw)
+ return 0;
+
+ return girq->init_hw(gc);
+}
+
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
{
struct gpio_irq_chip *girq = &gc->irq;
@@ -2446,8 +2461,13 @@ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
{
return 0;
}
-
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+{
+ return 0;
+}
+
static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
return 0;
@@ -3070,8 +3090,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
- if (value)
- return gpiod_direction_input(desc);
+ if (value) {
+ ret = gpiod_direction_input(desc);
+ goto set_output_flag;
+ }
}
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
@@ -3079,8 +3101,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
- if (!value)
- return gpiod_direction_input(desc);
+ if (!value) {
+ ret = gpiod_direction_input(desc);
+ goto set_output_flag;
+ }
} else {
gpio_set_config(gc, gpio_chip_hwgpio(desc),
PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -3088,6 +3112,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
set_output_value:
return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+ /*
+ * When emulating open-source or open-drain functionalities by not
+ * actively driving the line (setting mode to input) we still need to
+ * set the IS_OUT flag or otherwise we won't be able to set the line
+ * value anymore.
+ */
+ if (ret == 0)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
@@ -3448,8 +3483,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
if (value) {
ret = chip->direction_input(chip, offset);
- if (!ret)
- clear_bit(FLAG_IS_OUT, &desc->flags);
} else {
ret = chip->direction_output(chip, offset, 0);
if (!ret)
@@ -3479,8 +3512,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
ret = chip->direction_input(chip, offset);
- if (!ret)
- clear_bit(FLAG_IS_OUT, &desc->flags);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 42e2c1f57152..00962a659009 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index eba42c752bca..82155ac3288a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
u32 val = 0;
u32 count = 0;
struct device *dev;
- struct i2s_platform_data *i2s_pdata;
+ struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
- if (adev->acp.acp_cell == NULL)
- return -ENOMEM;
+ if (adev->acp.acp_cell == NULL) {
+ r = -ENOMEM;
+ goto failure;
+ }
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
- kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
switch (adev->asic_type) {
@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
- return r;
+ goto failure;
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ goto failure;
}
}
@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
+
+failure:
+ kfree(i2s_pdata);
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_cell);
+ kfree(adev->acp.acp_genpd);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 7bcf86c61999..85b0515c0fdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return 0;
error_free:
- while (i--) {
+ for (i = 0; i < last_entry; ++i) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+ amdgpu_bo_unref(&bo);
+ }
+ for (i = first_userptr; i < num_entries; ++i) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
@@ -270,7 +275,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
if (r)
- goto error_free;
+ return r;
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
@@ -283,8 +288,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
- amdgpu_bo_list_put(list);
- return r;
+ goto error_put_list;
}
handle = r;
@@ -306,9 +310,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&fpriv->bo_list_lock);
if (IS_ERR(old)) {
- amdgpu_bo_list_put(list);
r = PTR_ERR(old);
- goto error_free;
+ goto error_put_list;
}
amdgpu_bo_list_put(old);
@@ -325,8 +328,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
+error_put_list:
+ amdgpu_bo_list_put(list);
+
error_free:
- if (info)
- kvfree(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2e53feed40e2..82823d9a8ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
- bool binding_userptr = false;
struct mm_struct *usermm;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
@@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
- binding_userptr = true;
}
if (p->evictable == lobj)
@@ -563,10 +561,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (r)
return r;
- if (binding_userptr) {
- kvfree(lobj->user_pages);
- lobj->user_pages = NULL;
- }
+ kvfree(lobj->user_pages);
+ lobj->user_pages = NULL;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 264677ab248a..2a00a36106b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -81,9 +81,10 @@
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 34
+#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
@@ -1047,6 +1048,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+#ifdef CONFIG_DRM_AMDGPU_SI
+ if (!amdgpu_si_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ dev_info(&pdev->dev,
+ "SI support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ if (!amdgpu_cik_support) {
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dev_info(&pdev->dev,
+ "CIK support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Get rid of things like offb */
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 554a59b3c4a6..6ee4021910e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -165,6 +165,7 @@ struct amdgpu_gfx_config {
uint32_t num_sc_per_sh;
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
+ uint64_t tcc_disabled_mask;
};
struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9d76e0923a5a..96b2a31ccfed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
- int r;
+ int r = 0;
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
@@ -243,6 +243,8 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
+
+ fence = r ? ERR_PTR(r) : fence;
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index f6147528be64..d55f5baa83d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
struct amdgpu_device *adev;
int r, acpi_status;
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(dev->dev,
- "SI support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(dev->dev,
- "CIK support provided by radeon.\n");
- dev_info(dev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- }
-#endif
-
adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
if (adev == NULL) {
return -ENOMEM;
@@ -787,6 +752,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.pa_sc_tile_steering_override =
adev->gfx.config.pa_sc_tile_steering_override;
+ dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1fead0e8b890..7289e1b4fb60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -453,7 +453,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
.interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = false,
.resv = bp->resv,
- .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ .flags = bp->type != ttm_bo_type_kernel ?
+ TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b70b3c45bb29..65044b1b3d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000001;
for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 30ea54dd9117..e802f7d9db0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 7a6beb2e7c4e..3199e4a5ff12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
@@ -621,13 +622,14 @@ err:
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
@@ -675,13 +677,20 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e2fb141ff2e5..5251352f5922 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -603,14 +603,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
-#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
-#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 638c821611ab..8dfc775626a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -1691,6 +1691,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
}
}
+static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+{
+ /* TCCs are global (not instanced). */
+ uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+
+ adev->gfx.config.tcc_disabled_mask =
+ REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
+ (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
+}
+
static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -1702,6 +1713,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
+ gfx_v10_0_get_tcc_info(adev);
adev->gfx.config.pa_sc_tile_steering_override =
gfx_v10_0_init_pa_sc_tile_steering_override(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 8b789f750b72..db10640a3b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -151,6 +151,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
tmp = mmGCVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
tmp = mmGCVM_L2_CNTL4_DEFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 241a4e57cf4a..354e6200ca9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -309,6 +309,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
job->vm_needs_flush = true;
+ job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
r = amdgpu_job_submit(job, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 3542c203c3c8..b39bea6f54e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -137,6 +137,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
tmp = mmMMVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
tmp = mmMMVM_L2_CNTL4_DEFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 85393a99a848..de9b995b65b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -317,10 +317,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
ret = smu_baco_reset(smu);
} else {
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 78452cf0115d..4554e72c8378 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index fa2f70ce2e2b..f6e81680dd7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f70658a536a9..f8ab80c8801b 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -558,12 +558,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
{
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
return soc15_mode2_reset(adev);
default:
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev);
}
}
@@ -771,8 +773,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 670784a78512..217084d56ab8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* Open up a stream for HW test
*/
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -268,13 +269,14 @@ err:
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -327,13 +329,20 @@ err:
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 01f658fa72c6..0995378d8263 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* Open up a stream for HW test
*/
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -275,13 +276,14 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
+ addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
@@ -334,13 +336,20 @@ err:
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_bo *bo = NULL;
long r;
- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL, NULL);
+ if (r)
+ return r;
+
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8cab6da512a0..a52f0b13a2c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2385,8 +2385,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
- if (adev->asic_type == CHIP_RENOIR)
- dm->dc->debug.disable_stutter = true;
return 0;
fail:
@@ -6019,7 +6017,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
+#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source source;
+#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 985633c08a26..26c6d735cdc7 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -24,15 +24,20 @@
# It calculates Bandwidth and Watermarks values for HW programming
#
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+calcs_ccflags := -mhard-float -msse
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+calcs_ccflags += -mpreferred-stack-boundary=4
+else
calcs_ccflags += -msse2
endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5d1adeda4d90..4b8819c27fcd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -580,6 +580,10 @@ static bool construct(struct dc *dc,
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
// Allocate memory for the vm_helper
dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+ if (!dc->vm_helper) {
+ dm_error("%s: failed to create dc->vm_helper\n", __func__);
+ goto fail;
+ }
#endif
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ca20b150afcc..9c58670d5414 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2767,6 +2767,15 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ stream->link->link_enc->funcs->setup(
+ stream->link->link_enc,
+ pipe_ctx->stream->signal);
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 505967b48e14..51991bf26a93 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
enum display_dongle_type *dongle = &sink_cap->dongle_type;
uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
bool is_type2_dongle = false;
+ int retry_count = 2;
struct dp_hdmi_dongle_signature_data *dongle_signature;
/* Assume we have no valid DP passive dongle connected */
@@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
DP_HDMI_DONGLE_ADDRESS,
type2_dongle_buf,
sizeof(type2_dongle_buf))) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+ /* Passive HDMI dongles can sometimes fail here without retrying*/
+ while (retry_count > 0) {
+ if (i2c_read(ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf)))
+ break;
+ retry_count--;
+ }
+ if (retry_count == 0) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- return;
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
}
/* Check if Type 2 dongle.*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8f70295179ff..f25ac17f47fa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
if (stream1->view_format != stream2->view_format)
return false;
+ if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
+ return false;
+
return true;
}
static bool is_dp_and_hdmi_sharable(
@@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
if (!are_stream_backends_same(old_stream, stream))
return false;
+ if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 1787b9bf800a..76d54885374a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 318e9c2e2ca8..89620adc81d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 83e1878161c9..21a657e79306 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 8b85e5274bba..7c52f7f9196c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 4625df9f9fd2..643ccb0ade00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 01c7e30b9ce1..bbd6e01b3eca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
// All 3 color channels have same x
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
@@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
i = 1;
while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+ rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+ rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
corner_points[0].green.x = corner_points[0].red.x;
@@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
i = 1;
while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 59305e411a66..1599bb971111 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index ddb8d5649e79..63f3bddba7da 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -10,15 +10,20 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
DCN20 += dcn20_dsc.o
endif
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
+else
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index b4e3ce22ed52..dfb208285a9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -814,7 +814,7 @@ static const struct resource_caps res_cap_nv14 = {
.num_audio = 6,
.num_stream_encoder = 5,
.num_pll = 5,
- .num_dwb = 0,
+ .num_dwb = 1,
.num_ddc = 5,
};
@@ -1077,6 +1077,7 @@ struct clock_source *dcn20_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 8cd9de8b1a7a..ff50ae71fe27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -3,7 +3,22 @@
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
+endif
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 5b2a65b42403..8df251626e22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -24,15 +24,20 @@
# It provides the general basic services required by other DAL
# subcomponents.
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+dml_ccflags := -mhard-float -msse
-dml_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dml_ccflags += -mpreferred-stack-boundary=4
+else
dml_ccflags += -msse2
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 649883777f62..6c6c486b774a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 456cd0e3289c..3b6ed60dcd35 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -39,9 +39,6 @@
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
-
-typedef unsigned int uint;
-
typedef struct {
double DPPCLK;
double DISPCLK;
@@ -4774,7 +4771,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- uint m;
+ unsigned int m;
locals->cursor_bw[k] = 0;
locals->cursor_bw_pre[k] = 0;
@@ -5285,7 +5282,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
double FullDETBufferingTimeYStutterCriticalPlane = 0;
double TimeToFinishSwathTransferStutterCriticalPlane = 0;
- uint k, j;
+ unsigned int k, j;
mode_lib->vba.TotalActiveDPP = 0;
mode_lib->vba.TotalDCCActiveDPP = 0;
@@ -5507,7 +5504,7 @@ static void CalculateDCFCLKDeepSleep(
double DPPCLK[],
double *DCFCLKDeepSleep)
{
- uint k;
+ unsigned int k;
double DisplayPipeLineDeliveryTimeLuma;
double DisplayPipeLineDeliveryTimeChroma;
//double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
@@ -5727,7 +5724,7 @@ static void CalculatePixelDeliveryTimes(
double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
{
double req_per_swath_ub;
- uint k;
+ unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
@@ -5869,7 +5866,7 @@ static void CalculateMetaAndPTETimes(
unsigned int dpte_groups_per_row_chroma_ub;
unsigned int num_group_per_lower_vm_stage;
unsigned int num_req_per_lower_vm_stage;
- uint k;
+ unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index b456cd23c6fa..970737217e53 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,15 +1,20 @@
#
# Makefile for the 'dsc' sub-component of DAL.
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
- cc_stack_align := -mstack-alignment=16
-endif
+dsc_ccflags := -mhard-float -msse
-dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dsc_ccflags += -mpreferred-stack-boundary=4
+else
dsc_ccflags += -msse2
endif
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 33960fb38a5d..4acf139ea014 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -843,6 +843,8 @@ static int smu_sw_init(void *handle)
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
+ mutex_init(&smu->sensor_lock);
+
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index f1f072012fac..d493a3f8c07a 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1018,6 +1018,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1044,6 +1045,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
default:
ret = smu_smc_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d08493b67b67..beacfffbdc3e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -5098,9 +5098,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
- for (i = 0; i < podn_vdd_dep->count - 1; i++)
- od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
- if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+ for (i = 0; i < podn_vdd_dep->count; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 6109815a0401..23171a4d9a31 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -344,6 +344,7 @@ struct smu_context
const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
+ struct mutex sensor_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 12c0e469bf35..0b461404af6b 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -547,7 +547,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
- if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
@@ -1386,6 +1386,7 @@ static int navi10_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1409,6 +1410,7 @@ static int navi10_read_sensor(struct smu_context *smu,
default:
ret = smu_smc_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index dc754447f0dd..23c12018dbc1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 7c960b07746f..ae18fbcb26fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
count = SMU_MAX_SMIO_LEVELS;
for (level = 0; level < count; level++) {
table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
- data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
/* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
table->SmioTable2.Pattern[level].Smio =
(uint8_t) level;
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 64386ee3f878..bbd8ebd58434 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3023,6 +3023,7 @@ static int vega20_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3048,6 +3049,7 @@ static int vega20_read_sensor(struct smu_context *smu,
default:
ret = smu_smc_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 8820ce15ce37..ae274902ff92 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -82,7 +82,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index ea26bc9c2d00..b848270e0a1f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -564,8 +564,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
}
if (!in_range(&splitter->vsize, dflow->in_h)) {
- DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
- dflow->in_w);
+ DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+ dflow->in_h);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 2851cac94d86..b72840c06ab7 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
struct komeda_data_flow_cfg dflow;
int err;
- if (!writeback_job || !writeback_job->fb) {
+ if (!writeback_job)
return 0;
- }
if (!crtc_st->active) {
DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
&komeda_wb_encoder_helper_funcs,
formats, n_formats);
komeda_put_fourcc_list(formats);
- if (err)
+ if (err) {
+ kfree(kwb_conn);
return err;
+ }
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 22c0847986df..875a3a9eabfa 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -131,7 +131,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_framebuffer *fb;
int i, n_planes;
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
@@ -248,7 +248,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
mw_state = to_mw_state(conn_state);
- if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+ if (conn_state->writeback_job) {
struct drm_framebuffer *fb = conn_state->writeback_job->fb;
DRM_DEV_DEBUG_DRIVER(drm->dev,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index cebc8e620820..8a8d605021f0 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -728,6 +728,8 @@ static int tc_set_video_mode(struct tc_data *tc,
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
u32 dp0_syncval;
+ u32 bits_per_pixel = 24;
+ u32 in_bw, out_bw;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
@@ -735,7 +737,10 @@ static int tc_set_video_mode(struct tc_data *tc,
* (output active video bandwidth in bytes))
* Must be less than tu_size.
*/
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
+ in_bw = mode->clock * bits_per_pixel / 8;
+ out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+ max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 419381abbdd1..14aeaf736321 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
return -EINVAL;
}
- if (writeback_job->out_fence && !writeback_job->fb) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
- connector->base.id, connector->name);
- return -EINVAL;
+ if (!writeback_job->fb) {
+ if (writeback_job->out_fence) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ drm_writeback_cleanup_job(writeback_job);
+ state->writeback_job = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 82a4ceed3fcf..6b0177112e18 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -159,6 +159,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ /* Lenovo G50 */
+ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index ff138b6ec48b..43d9e3bb3a94 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
if (job->fb)
drm_framebuffer_put(job->fb);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+
kfree(job);
}
EXPORT_SYMBOL(drm_writeback_cleanup_job);
@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
{
unsigned long flags;
struct drm_writeback_job *job;
+ struct dma_fence *out_fence;
spin_lock_irqsave(&wb_connector->job_lock, flags);
job = list_first_entry_or_null(&wb_connector->job_queue,
struct drm_writeback_job,
list_entry);
- if (job) {
+ if (job)
list_del(&job->list_entry);
- if (job->out_fence) {
- if (status)
- dma_fence_set_error(job->out_fence, status);
- dma_fence_signal(job->out_fence);
- dma_fence_put(job->out_fence);
- }
- }
+
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
if (WARN_ON(!job))
return;
+ out_fence = job->out_fence;
+ if (out_fence) {
+ if (status)
+ dma_fence_set_error(out_fence, status);
+ dma_fence_signal(out_fence);
+ dma_fence_put(out_fence);
+ job->out_fence = NULL;
+ }
+
INIT_WORK(&job->cleanup_work, cleanup_work);
queue_work(system_long_wq, &job->cleanup_work);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 698db540972c..648cf0207309 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
&gpu->mmu_context->cmdbuf_mapping));
+ mutex_unlock(&gpu->mmu_context->lock);
+
/* Reserve space for the bomap */
if (n_bomap_pages) {
bomap_start = bomap = iter.data;
@@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
obj->base.size);
}
- mutex_unlock(&gpu->mmu_context->lock);
-
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 043111a1d60c..f8bf488e9d71 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu
memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
buf += SZ_4K;
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
- if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+ buf += SZ_4K;
+ }
}
static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 35ebae6a1be7..3607d348c298 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
global->memory_base);
- if (ret) {
- global->ops->free(ctx);
- return NULL;
+ if (ret)
+ goto out_free;
+
+ if (global->version == ETNAVIV_IOMMU_V1 &&
+ ctx->cmdbuf_mapping.iova > 0x80000000) {
+ dev_err(global->dev,
+ "command buffer outside valid memory window\n");
+ goto out_unmap;
}
return ctx;
+
+out_unmap:
+ etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+ global->ops->free(ctx);
+ return NULL;
}
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index efb39f350b19..3250c1b8dcca 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(port));
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Give child device order the priority, first come first
- * served.
+ * Give inverse child device order the priority,
+ * last one wins. Yes, there are real machines
+ * (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and
+ * we must pick port E :(
*/
+ info = &dev_priv->vbt.ddi_port_info[p];
+
info->supports_dvi = false;
info->supports_hdmi = false;
info->alternate_ddc_pin = 0;
@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(port));
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Give child device order the priority, first come first
- * served.
+ * Give inverse child device order the priority,
+ * last one wins. Yes, there are real machines
+ * (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and
+ * we must pick port E :(
*/
+ info = &dev_priv->vbt.ddi_port_info[p];
+
info->supports_dp = false;
info->alternate_aux_channel = 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b51d1ceb8739..dfff6f4357b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3280,7 +3280,20 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
- return 4096;
+ /*
+ * Validated limit is 4k, but has 5k should
+ * work apart from the following features:
+ * - Ytile (already limited to 4k)
+ * - FP16 (already limited to 4k)
+ * - render compression (already limited to 4k)
+ * - KVMR sprite and cursor (don't care)
+ * - horizontal panning (TODO verify this)
+ * - pipe and plane scaling (TODO verify this)
+ */
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
/* FIXME AUX plane? */
@@ -7261,7 +7274,7 @@ retry:
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false);
+ link_bw, &pipe_config->fdi_m_n, false, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EDEADLK)
@@ -7508,11 +7521,15 @@ void
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n)
+ bool constant_n, bool fec_enable)
{
- m_n->tu = 64;
+ u32 data_clock = bits_per_pixel * pixel_clock;
+
+ if (fec_enable)
+ data_clock = intel_dp_mode_to_fec_clock(data_clock);
- compute_m_n(bits_per_pixel * pixel_clock,
+ m_n->tu = 64;
+ compute_m_n(data_clock,
link_clock * nlanes * 8,
&m_n->gmch_m, &m_n->gmch_n,
constant_n);
@@ -9298,7 +9315,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- bool pch_ssc_in_use = false;
bool has_fdi = false;
for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -9326,22 +9342,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
+ dev_priv->pch_ssc_use = 0;
+
if (spll_uses_pch_ssc(dev_priv)) {
DRM_DEBUG_KMS("SPLL using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
- pch_ssc_in_use = true;
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (pch_ssc_in_use)
+ if (dev_priv->pch_ssc_use)
return;
if (has_fdi) {
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index e57e6969051d..01fa87ad3270 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -414,7 +414,7 @@ enum phy_fia {
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n);
+ bool constant_n, bool fec_enable);
bool is_ccs_modifier(u64 modifier);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 921ad0a2f7ba..57e9f0ba331b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -78,8 +78,8 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
-#define DP_DSC_FEC_OVERHEAD_FACTOR 976
+/* DP DSC FEC Overhead factor = 1/(0.972261) */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -494,6 +494,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
+{
+ return div_u64(mul_u32_u32(mode_clock, 1000000U),
+ DP_DSC_FEC_OVERHEAD_FACTOR);
+}
+
+static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay)
+{
+ u32 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
+ * for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8) /
+ intel_dp_mode_to_fec_clock(mode_clock);
+ DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+ DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
+ return 0;
+ }
+
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay)
+{
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = min_t(u8, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ if (valid_dsc_slicecount[i] >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ false))
+ break;
+ if (min_slice_count <= valid_dsc_slicecount[i])
+ return valid_dsc_slicecount[i];
+ }
+
+ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ return 0;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -2226,7 +2317,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- constant_n);
+ constant_n, pipe_config->fec_enable);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2236,7 +2327,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2,
- constant_n);
+ constant_n, pipe_config->fec_enable);
}
if (!HAS_DDI(dev_priv))
@@ -4323,91 +4414,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay)
-{
- u16 bits_per_pixel, max_bpp_small_joiner_ram;
- int i;
-
- /*
- * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
- * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
- * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
- * for MST -> TimeSlotsPerMTP has to be calculated
- */
- bits_per_pixel = (link_clock * lane_count * 8 *
- DP_DSC_FEC_OVERHEAD_FACTOR) /
- mode_clock;
-
- /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
- mode_hdisplay;
-
- /*
- * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
- * check, output bpp from small joiner RAM check)
- */
- bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
-
- /* Error out if the max bpp is less than smallest allowed valid bpp */
- if (bits_per_pixel < valid_dsc_bpp[0]) {
- DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
- return 0;
- }
-
- /* Find the nearest match in the array of known BPPs from VESA */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
- if (bits_per_pixel < valid_dsc_bpp[i + 1])
- break;
- }
- bits_per_pixel = valid_dsc_bpp[i];
-
- /*
- * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
- * fractional part is 0
- */
- return bits_per_pixel << 4;
-}
-
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
- int mode_clock,
- int mode_hdisplay)
-{
- u8 min_slice_count, i;
- int max_slice_width;
-
- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_0);
- else
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_1);
-
- max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
- if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
- max_slice_width);
- return 0;
- }
- /* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
- DIV_ROUND_UP(mode_hdisplay,
- max_slice_width));
-
- /* Find the closest match to the valid slice count values */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
- if (valid_dsc_slicecount[i] >
- drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
- false))
- break;
- if (min_slice_count <= valid_dsc_slicecount[i])
- return valid_dsc_slicecount[i];
- }
-
- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
- return 0;
-}
-
static void
intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 657bbb1f5ed0..00981fb9414b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
@@ -118,4 +114,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6df240a01b8c..600873c796d0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
- constant_n);
+ constant_n, crtc_state->fec_enable);
crtc_state->dp_m_n.tu = slots;
return 0;
@@ -615,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = BIT(pipe);
+ intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index b8148f838354..d5a298c3c83b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -525,16 +525,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
val = I915_READ(WRPLL_CTL(id));
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL(id));
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
}
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
+ enum intel_dpll_id id = pll->info->id;
u32 val;
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
}
static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index e7588799fce5..104cf6d42333 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -147,11 +147,11 @@ enum intel_dpll_id {
*/
DPLL_ID_ICL_MGPLL4 = 6,
/**
- * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+ * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
*/
DPLL_ID_TGL_MGPLL5 = 7,
/**
- * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+ * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
*/
DPLL_ID_TGL_MGPLL6 = 8,
};
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index dea63be1964f..cae25e493128 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -1528,6 +1528,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
+ unsigned int stride = plane_state->color_plane[0].stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
@@ -1569,9 +1570,9 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+ if (stride > 4096) {
DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
- fb->pitches[0], 4096);
+ stride, 4096);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 261c9bd83f51..05289edbafe3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -245,11 +245,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(rpm);
- srcu = intel_gt_reset_trylock(ggtt->vm.gt);
- if (srcu < 0) {
- ret = srcu;
+ ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
+ if (ret)
goto err_rpm;
- }
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -318,7 +316,11 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
- i915_vma_set_ggtt_write(vma);
+ if (write) {
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ i915_vma_set_ggtt_write(vma);
+ obj->mm.dirty = true;
+ }
err_fence:
i915_vma_unpin_fence(vma);
@@ -362,6 +364,7 @@ err:
return VM_FAULT_OOM;
case -ENOSPC:
case -EFAULT:
+ case -ENODEV: /* bad object, how did you get here! */
return VM_FAULT_SIGBUS;
default:
WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
@@ -473,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (!obj)
return -ENOENT;
+ if (i915_gem_object_never_bind_ggtt(obj)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
ret = create_mmap_offset(obj);
if (ret == 0)
*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+out:
i915_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 5efb9936e05b..ddf3605bea8e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -153,6 +153,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+}
+
+static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index ede0eb4218a8..646859fea224 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(3)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 92e53c25424c..ad2a63dbcac2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -241,9 +241,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
-
if (i915_gem_init_hw(i915))
goto err_wedged;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 11b231c187c5..6b3b50f0f6d9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_NO_GGTT |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index d3c6993f4f46..22aab8593abf 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
return READ_ONCE(*execlists->active);
}
+static inline void
+execlists_active_lock_bh(struct intel_engine_execlists *execlists)
+{
+ local_bh_disable(); /* prevent local softirq and lock recursion */
+ tasklet_lock(&execlists->tasklet);
+}
+
+static inline void
+execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
+{
+ tasklet_unlock(&execlists->tasklet);
+ local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
+}
+
struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 82630db0394b..4ce8626b140e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct drm_i915_private *dev_priv = engine->i915;
- const struct intel_engine_execlists * const execlists =
- &engine->execlists;
- unsigned long flags;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
@@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
- spin_lock_irqsave(&engine->active.lock, flags);
+ execlists_active_lock_bh(execlists);
for (port = execlists->active; (rq = *port); port++) {
char hdr[80];
int len;
@@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
hwsp_seqno(rq));
print_request(m, rq, hdr);
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- spin_lock_irqsave(&engine->active.lock, flags);
- write_seqlock(&engine->stats.lock);
+ execlists_active_lock_bh(execlists);
+ write_seqlock_irqsave(&engine->stats.lock, flags);
if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
@@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
unlock:
- write_sequnlock(&engine->stats.lock);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+ execlists_active_unlock_bh(execlists);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index d42584439f51..06a506c29463 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static void mark_eio(struct i915_request *rq)
+{
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+}
+
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
@@ -631,7 +638,6 @@ execlists_schedule_out(struct i915_request *rq)
struct intel_engine_cs *cur, *old;
trace_i915_request_out(rq);
- GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
old = READ_ONCE(ce->inflight);
do
@@ -797,6 +803,17 @@ static bool can_merge_rq(const struct i915_request *prev,
GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next));
+ /*
+ * We do not submit known completed requests. Therefore if the next
+ * request is already completed, we can pretend to merge it in
+ * with the previous context (and we will skip updating the ELSP
+ * and tracking). Thus hopefully keeping the ELSP full with active
+ * contexts, despite the best efforts of preempt-to-busy to confuse
+ * us.
+ */
+ if (i915_request_completed(next))
+ return true;
+
if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false;
@@ -893,7 +910,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
static struct i915_request *
last_active(const struct intel_engine_execlists *execlists)
{
- struct i915_request * const *last = execlists->active;
+ struct i915_request * const *last = READ_ONCE(execlists->active);
while (*last && i915_request_completed(*last))
last++;
@@ -1172,21 +1189,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
continue;
}
- if (i915_request_completed(rq)) {
- ve->request = NULL;
- ve->base.execlists.queue_priority_hint = INT_MIN;
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
-
- rq->engine = engine;
- __i915_request_submit(rq);
-
- spin_unlock(&ve->base.active.lock);
-
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
-
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
return; /* leave this for another */
@@ -1237,11 +1239,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(ve->siblings[0] != engine);
}
- __i915_request_submit(rq);
- if (!i915_request_completed(rq)) {
+ if (__i915_request_submit(rq)) {
submit = true;
last = rq;
}
+ i915_request_put(rq);
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (!submit) {
+ spin_unlock(&ve->base.active.lock);
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
}
spin_unlock(&ve->base.active.lock);
@@ -1254,8 +1269,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- if (i915_request_completed(rq))
- goto skip;
+ bool merge = true;
/*
* Can we combine this request with the current port?
@@ -1296,14 +1310,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
- *port = execlists_schedule_in(last, port - execlists->pending);
- port++;
+ merge = false;
}
- last = rq;
- submit = true;
-skip:
- __i915_request_submit(rq);
+ if (__i915_request_submit(rq)) {
+ if (!merge) {
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ port++;
+ last = NULL;
+ }
+
+ GEM_BUG_ON(last &&
+ !can_merge_ctx(last->hw_context,
+ rq->hw_context));
+
+ submit = true;
+ last = rq;
+ }
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -1593,8 +1616,11 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
- if (!engine->execlists.pending[0])
+ if (!engine->execlists.pending[0]) {
+ rcu_read_lock(); /* protect peeking at execlists->active */
execlists_dequeue(engine);
+ rcu_read_unlock();
+ }
}
/*
@@ -2399,10 +2425,14 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
static struct i915_request *active_request(struct i915_request *rq)
{
- const struct list_head * const list = &rq->timeline->requests;
const struct intel_context * const ce = rq->hw_context;
struct i915_request *active = NULL;
+ struct list_head *list;
+ if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
+ return rq;
+
+ list = &rq->timeline->requests;
list_for_each_entry_from_reverse(rq, list, link) {
if (i915_request_completed(rq))
break;
@@ -2552,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
+ list_for_each_entry(rq, &engine->active.requests, sched.link)
+ mark_eio(rq);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
@@ -2565,10 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- list_del_init(&rq->sched.link);
+ mark_eio(rq);
__i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -2584,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock);
- if (ve->request) {
- ve->request->engine = engine;
- __i915_request_submit(ve->request);
- dma_fence_set_error(&ve->request->fence, -EIO);
- i915_request_mark_complete(ve->request);
+ rq = fetch_and_zero(&ve->request);
+ if (rq) {
+ mark_eio(rq);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+ i915_request_put(rq);
+
ve->base.execlists.queue_priority_hint = INT_MIN;
- ve->request = NULL;
}
spin_unlock(&ve->base.active.lock);
}
@@ -3594,6 +3620,8 @@ submit_engine:
static void virtual_submit_request(struct i915_request *rq)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ struct i915_request *old;
+ unsigned long flags;
GEM_TRACE("%s: rq=%llx:%lld\n",
ve->base.name,
@@ -3602,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
- GEM_BUG_ON(ve->request);
- GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ spin_lock_irqsave(&ve->base.active.lock, flags);
+
+ old = ve->request;
+ if (old) { /* background completion event from preempt-to-busy */
+ GEM_BUG_ON(!i915_request_completed(old));
+ __i915_request_submit(old);
+ i915_request_put(old);
+ }
+
+ if (i915_request_completed(rq)) {
+ __i915_request_submit(rq);
+
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ ve->request = NULL;
+ } else {
+ ve->base.execlists.queue_priority_hint = rq_prio(rq);
+ ve->request = i915_request_get(rq);
- ve->base.execlists.queue_priority_hint = rq_prio(rq);
- WRITE_ONCE(ve->request, rq);
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
- list_move_tail(&rq->sched.link, virtual_queue(ve));
+ tasklet_schedule(&ve->base.execlists.tasklet);
+ }
- tasklet_schedule(&ve->base.execlists.tasklet);
+ spin_unlock_irqrestore(&ve->base.active.lock, flags);
}
static struct ve_bond *
@@ -3631,18 +3675,22 @@ static void
virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ intel_engine_mask_t allowed, exec;
struct ve_bond *bond;
+ allowed = ~to_request(signal)->engine->mask;
+
bond = virtual_find_bond(ve, to_request(signal)->engine);
- if (bond) {
- intel_engine_mask_t old, new, cmp;
+ if (bond)
+ allowed &= bond->sibling_mask;
- cmp = READ_ONCE(rq->execution_mask);
- do {
- old = cmp;
- new = cmp & bond->sibling_mask;
- } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
- }
+ /* Restrict the bonded request to run on only the available engines */
+ exec = READ_ONCE(rq->execution_mask);
+ while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ ;
+
+ /* Prevent the master from being re-run on the bonded engines */
+ to_request(signal)->execution_mask &= ~allowed;
}
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b9d84d52e986..8cea42379dd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -42,11 +42,10 @@ static void engine_skip_context(struct i915_request *rq)
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
- lockdep_assert_held(&engine->active.lock);
-
if (!i915_request_is_active(rq))
return;
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO);
@@ -123,7 +122,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rq->fence.seqno,
yesno(guilty));
- lockdep_assert_held(&rq->engine->active.lock);
GEM_BUG_ON(i915_request_completed(rq));
if (guilty) {
@@ -1214,10 +1212,8 @@ out:
intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
}
-int intel_gt_reset_trylock(struct intel_gt *gt)
+int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
{
- int srcu;
-
might_lock(&gt->reset.backoff_srcu);
might_sleep();
@@ -1232,10 +1228,10 @@ int intel_gt_reset_trylock(struct intel_gt *gt)
rcu_read_lock();
}
- srcu = srcu_read_lock(&gt->reset.backoff_srcu);
+ *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
rcu_read_unlock();
- return srcu;
+ return 0;
}
void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 37a987b17108..52c00199e069 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -38,7 +38,7 @@ int intel_engine_reset(struct intel_engine_cs *engine,
void __i915_request_reset(struct i915_request *rq, bool guilty);
-int __must_check intel_gt_reset_trylock(struct intel_gt *gt);
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
void intel_gt_set_wedged(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 601c16239fdf..bacaa7bb8c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1573,7 +1573,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
const int num_engines =
- IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+ IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 45481eb1fa3c..5f6ec2fd29a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1063,6 +1063,9 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
whitelist_reg(w, GEN8_HDC_CHICKEN1);
+
+ /* WaSendPushConstantsFromMMIO:skl,bxt */
+ whitelist_reg(w, COMMON_SLICE_CHICKEN2);
}
static void skl_whitelist_build(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 020696726f9e..bb6f86c7067a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1924,6 +1924,11 @@ static int i915_drm_resume(struct drm_device *dev)
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_restore_gtt_mappings(dev_priv);
+ i915_gem_restore_fences(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
intel_csr_ucode_resume(dev_priv);
i915_restore_state(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 772154e4073e..953e1d12c23c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1723,6 +1723,8 @@ struct drm_i915_private {
struct work_struct idle_work;
} gem;
+ u8 pch_ssc_use;
+
/* For i945gm vblank irq vs. C3 workaround */
struct {
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95e7c52cf8ed..d0f94f239919 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
+ if (i915_gem_object_never_bind_ggtt(obj))
+ return ERR_PTR(-ENODEV);
+
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/* If the required space is larger than the available
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 167a7b56ed5b..6795f1daa3d5 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -77,6 +77,12 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+ while (!tasklet_trylock(t))
+ cpu_relax();
+}
+
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a53777dd371c..1c5506822dc7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -194,6 +194,27 @@ static void free_capture_list(struct i915_request *request)
}
}
+static void remove_from_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Virtual engines complicate acquiring the engine timeline lock,
+ * as their rq->engine pointer is not stable until under that
+ * engine lock. The simple ploy we use is to take the lock then
+ * check that the rq still belongs to the newly locked engine.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
+ }
+ list_del(&rq->sched.link);
+ spin_unlock(&locked->active.lock);
+}
+
static bool i915_request_retire(struct i915_request *rq)
{
struct i915_active_request *active, *next;
@@ -259,9 +280,7 @@ static bool i915_request_retire(struct i915_request *rq)
* request that we have removed from the HW and put back on a run
* queue.
*/
- spin_lock(&rq->engine->active.lock);
- list_del(&rq->sched.link);
- spin_unlock(&rq->engine->active.lock);
+ remove_from_engine(rq);
spin_lock(&rq->lock);
i915_request_mark_complete(rq);
@@ -358,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq,
return 0;
}
-void __i915_request_submit(struct i915_request *request)
+bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ bool result = false;
GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
@@ -370,6 +390,25 @@ void __i915_request_submit(struct i915_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
+ /*
+ * With the advent of preempt-to-busy, we frequently encounter
+ * requests that we have unsubmitted from HW, but left running
+ * until the next ack and so have completed in the meantime. On
+ * resubmission of that completed request, we can skip
+ * updating the payload, and execlists can even skip submitting
+ * the request.
+ *
+ * We must remove the request from the caller's priority queue,
+ * and the caller must only call us when the request is in their
+ * priority queue, under the active.lock. This ensures that the
+ * request has *not* yet been retired and we can safely move
+ * the request into the engine->active.list where it will be
+ * dropped upon retiring. (Otherwise if resubmit a *retired*
+ * request, this would be a horrible use-after-free.)
+ */
+ if (i915_request_completed(request))
+ goto xfer;
+
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
@@ -393,13 +432,18 @@ void __i915_request_submit(struct i915_request *request)
i915_sw_fence_signaled(&request->semaphore))
engine->saturated |= request->sched.semaphores;
- /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ engine->emit_fini_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
- list_move_tail(&request->sched.link, &engine->active.requests);
+ trace_i915_request_execute(request);
+ engine->serial++;
+ result = true;
+
+xfer: /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
- set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+ if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+ list_move_tail(&request->sched.link, &engine->active.requests);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
@@ -410,12 +454,7 @@ void __i915_request_submit(struct i915_request *request)
spin_unlock(&request->lock);
- engine->emit_fini_breadcrumb(request,
- request->ring->vaddr + request->postfix);
-
- engine->serial++;
-
- trace_i915_request_execute(request);
+ return result;
}
void i915_request_submit(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8ac6e1226a56..e4dd013761e8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq,
void i915_request_add(struct i915_request *rq);
-void __i915_request_submit(struct i915_request *request);
+bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
void i915_request_skip(struct i915_request *request, int error);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index fa864d8f2b73..15f8bff141f9 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -69,6 +69,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index e6a2d65f19c6..c29c81ec7971 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -41,6 +41,7 @@ enum intel_pch {
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index bb6dd54a6ff3..37593831b539 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -118,6 +118,12 @@ static void pm_resume(struct drm_i915_private *i915)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_restore_gtt_mappings(i915);
+ i915_gem_restore_fences(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 663ff9f4fac9..1e7b1be25bb0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -26,6 +26,8 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index e226324adb69..4bdd63b57100 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1083,7 +1083,7 @@ static const struct dss_features omap34xx_dss_feats = {
static const struct dss_features omap3630_dss_feats = {
.model = DSS_MODEL_OMAP3,
- .fck_div_max = 32,
+ .fck_div_max = 31,
.fck_freq_max = 173000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index fc82a525b071..ee4379729a5b 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -220,9 +220,17 @@ static const struct of_device_id lb035q02_of_match[] = {
MODULE_DEVICE_TABLE(of, lb035q02_of_match);
+static const struct spi_device_id lb035q02_ids[] = {
+ { "lb035q02", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, lb035q02_ids);
+
static struct spi_driver lb035q02_driver = {
.probe = lb035q02_probe,
.remove = lb035q02_remove,
+ .id_table = lb035q02_ids,
.driver = {
.name = "panel-lg-lb035q02",
.of_match_table = lb035q02_of_match,
@@ -231,7 +239,6 @@ static struct spi_driver lb035q02_driver = {
module_spi_driver(lb035q02_driver);
-MODULE_ALIAS("spi:lgphilips,lb035q02");
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 299b217c83e1..20f17e46e65d 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -230,9 +230,17 @@ static const struct of_device_id nl8048_of_match[] = {
MODULE_DEVICE_TABLE(of, nl8048_of_match);
+static const struct spi_device_id nl8048_ids[] = {
+ { "nl8048hl11", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, nl8048_ids);
+
static struct spi_driver nl8048_driver = {
.probe = nl8048_probe,
.remove = nl8048_remove,
+ .id_table = nl8048_ids,
.driver = {
.name = "panel-nec-nl8048hl11",
.pm = &nl8048_pm_ops,
@@ -242,7 +250,6 @@ static struct spi_driver nl8048_driver = {
module_spi_driver(nl8048_driver);
-MODULE_ALIAS("spi:nec,nl8048hl11");
MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 305259b58767..3d5b9c4f68d9 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -684,9 +684,17 @@ static const struct of_device_id acx565akm_of_match[] = {
MODULE_DEVICE_TABLE(of, acx565akm_of_match);
+static const struct spi_device_id acx565akm_ids[] = {
+ { "acx565akm", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, acx565akm_ids);
+
static struct spi_driver acx565akm_driver = {
.probe = acx565akm_probe,
.remove = acx565akm_remove,
+ .id_table = acx565akm_ids,
.driver = {
.name = "panel-sony-acx565akm",
.of_match_table = acx565akm_of_match,
@@ -695,7 +703,6 @@ static struct spi_driver acx565akm_driver = {
module_spi_driver(acx565akm_driver);
-MODULE_ALIAS("spi:sony,acx565akm");
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index d7b2e34626ef..f2baff827f50 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -375,8 +375,7 @@ static const struct of_device_id td028ttec1_of_match[] = {
MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
static const struct spi_device_id td028ttec1_ids[] = {
- { "tpo,td028ttec1", 0},
- { "toppoly,td028ttec1", 0 },
+ { "td028ttec1", 0 },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 84370562910f..ba163c779084 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -491,9 +491,17 @@ static const struct of_device_id td043mtea1_of_match[] = {
MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
+static const struct spi_device_id td043mtea1_ids[] = {
+ { "td043mtea1", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
+
static struct spi_driver td043mtea1_driver = {
.probe = td043mtea1_probe,
.remove = td043mtea1_remove,
+ .id_table = td043mtea1_ids,
.driver = {
.name = "panel-tpo-td043mtea1",
.pm = &td043mtea1_pm_ops,
@@ -503,7 +511,6 @@ static struct spi_driver td043mtea1_driver = {
module_spi_driver(td043mtea1_driver);
-MODULE_ALIAS("spi:tpo,td043mtea1");
MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index bc2ddeb55f5d..f21bc8a7ee3a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -556,11 +556,11 @@ static int panfrost_probe(struct platform_device *pdev)
return 0;
err_out2:
+ pm_runtime_disable(pfdev->dev);
panfrost_devfreq_fini(pfdev);
err_out1:
panfrost_device_fini(pfdev);
err_out0:
- pm_runtime_disable(pfdev->dev);
drm_dev_put(ddev);
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f67ed925c0ef..8822ec13a0d6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
+ pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a58551668d9a..21f34d44aac2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
- mutex_lock(&pfdev->reset_lock);
+ if (!mutex_trylock(&pfdev->reset_lock))
+ return;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+ drm_sched_stop(sched, sched_job);
+ if (js != i)
+ /* Ensure any timeouts on other slots have finished */
+ cancel_delayed_work_sync(&sched->work_tdr);
+ }
- if (sched_job)
- drm_sched_increase_karma(sched_job);
+ drm_sched_increase_karma(sched_job);
spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index bdd990568476..a3ed64a1f15e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size)
return SZ_2M;
}
-void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
- struct panfrost_mmu *mmu,
- u64 iova, size_t size)
+static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ struct panfrost_mmu *mmu,
+ u64 iova, size_t size)
{
if (mmu->as < 0)
return;
@@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
spin_lock(&pfdev->as_lock);
list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
if (as == mmu->as)
- break;
+ goto found_mmu;
}
- if (as != mmu->as)
- goto out;
+ goto out;
+found_mmu:
priv = container_of(mmu, struct panfrost_file_priv, mmu);
spin_lock(&priv->mm_lock);
@@ -432,7 +432,8 @@ out:
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
-int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ u64 addr)
{
int ret, i;
struct panfrost_gem_object *bo;
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 83c57d325ca8..2dba192bf198 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -16,6 +16,7 @@
#include "panfrost_issues.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
+#include "panfrost_perfcnt.h"
#include "panfrost_regs.h"
#define COUNTERS_PER_BLOCK 64
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d0bc91ed7c90..4528f4dc0b2d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -379,7 +379,9 @@ radeon_pci_remove(struct pci_dev *pdev)
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
+#ifdef CONFIG_PPC64
struct drm_device *ddev = pci_get_drvdata(pdev);
+#endif
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
@@ -387,11 +389,15 @@ radeon_pci_shutdown(struct pci_dev *pdev)
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
+#ifdef CONFIG_PPC64
/* Some adapters need to be suspended before a
- * shutdown occurs in order to prevent an error
- * during kexec.
- */
+ * shutdown occurs in order to prevent an error
+ * during kexec.
+ * Make this power specific becauase it breaks
+ * some non-power boards.
+ */
radeon_suspend_kms(ddev, true, true, false);
+#endif
}
static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index ae07290bba6a..04efa78d70b6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
unsigned int i;
state = rcrtc->writeback.base.state;
- if (!state || !state->writeback_job || !state->writeback_job->fb)
+ if (!state || !state->writeback_job)
return;
fb = state->writeback_job->fb;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 9a0ee74d82dc..f39b97ed4ade 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -479,6 +479,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context;
bool found_guilty = false;
+ struct dma_fence *fence;
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -492,7 +493,16 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
dma_fence_put(s_job->s_fence->parent);
- s_job->s_fence->parent = sched->ops->run_job(s_job);
+ fence = sched->ops->run_job(s_job);
+
+ if (IS_ERR_OR_NULL(fence)) {
+ s_job->s_fence->parent = NULL;
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+ } else {
+ s_job->s_fence->parent = fence;
+ }
+
+
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -720,7 +730,7 @@ static int drm_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
- if (fence) {
+ if (!IS_ERR_OR_NULL(fence)) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_process_job);
@@ -730,8 +740,11 @@ static int drm_sched_main(void *param)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
- } else
+ } else {
+
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
drm_sched_process_job(NULL, &sched_job->cb);
+ }
wake_up(&sched->job_scheduled);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 525dc1c0f1c1..530edb3b51cc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -7,6 +7,7 @@
#include <linux/gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 504763423d46..a46ac284dd5e 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -63,7 +63,6 @@ config TINYDRM_REPAPER
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
- depends on THERMAL || !THERMAL
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 20ff56f27aa4..98819462f025 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+ !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+ TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
- ttm_bo_get(busy_bo);
+ kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- ttm_bo_put(busy_bo);
+ kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 76eedb963693..46dc3de7e81b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
else
ret = vmf_insert_pfn(&cvma, address, pfn);
- /*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
- */
-
- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
- break;
- else if (unlikely(ret & VM_FAULT_ERROR))
- goto out_io_unlock;
+ /* Never error on prefaulted PTEs */
+ if (unlikely((ret & VM_FAULT_ERROR))) {
+ if (i == 0)
+ goto out_io_unlock;
+ else
+ break;
+ }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5d80507b539b..19c092d75266 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -557,13 +557,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->bcl_start != args->bcl_end) {
bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
- if (!bin)
+ if (!bin) {
+ v3d_job_put(&render->base);
return -ENOMEM;
+ }
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
v3d_job_put(&render->base);
+ kfree(bin);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 1ce4d7142b6e..bf720206727f 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -231,7 +231,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
int i;
conn_state = drm_atomic_get_new_connector_state(state, conn);
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -271,8 +271,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
u32 ctrl;
int i;
- if (WARN_ON(!conn_state->writeback_job ||
- !conn_state->writeback_job->fb))
+ if (WARN_ON(!conn_state->writeback_job))
return;
mode = &conn_state->crtc->state->adjusted_mode;
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index ba1828acd8c9..4be49c1aef51 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -718,17 +718,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
struct device *dev = &xb_dev->dev;
int ret;
- /*
- * The device is not spawn from a device tree, so arch_setup_dma_ops
- * is not called, thus leaving the device with dummy DMA ops.
- * This makes the device return error on PRIME buffer import, which
- * is not correct: to fix this call of_dma_configure() with a NULL
- * node to set default DMA ops.
- */
- dev->coherent_dma_mask = DMA_BIT_MASK(32);
- ret = of_dma_configure(dev, NULL, true);
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret < 0) {
- DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+ DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
return ret;
}
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 6654c1550e2e..fbe4e16ab029 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -63,13 +63,20 @@ static int axff_init(struct hid_device *hid)
{
struct axff_device *axff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int field_count = 0;
int i, j;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3eaee2c37931..63fdbf09b044 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1139,6 +1139,7 @@ int hid_open_report(struct hid_device *device)
__u8 *start;
__u8 *buf;
__u8 *end;
+ __u8 *next;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
@@ -1192,7 +1193,8 @@ int hid_open_report(struct hid_device *device)
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
ret = -EINVAL;
- while ((start = fetch_item(start, end, &item)) != NULL) {
+ while ((next = fetch_item(start, end, &item)) != NULL) {
+ start = next;
if (item.format != HID_ITEM_FORMAT_SHORT) {
hid_err(device, "unexpected long global item\n");
@@ -1230,7 +1232,8 @@ int hid_open_report(struct hid_device *device)
}
}
- hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
+ hid_err(device, "item fetching failed at offset %u/%u\n",
+ size - (unsigned int)(end - start), size);
err:
kfree(parser->collection_stack);
alloc_err:
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 17e17f9a597b..947f19f8685f 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -75,13 +75,19 @@ static int drff_init(struct hid_device *hid)
{
struct drff_device *drff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 7cd5651872d3..c34f2e5a049f 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -47,13 +47,19 @@ static int emsff_init(struct hid_device *hid)
{
struct emsff_device *emsff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 0f95c96b70f8..ecbd3995a4eb 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -64,14 +64,20 @@ static int gaff_init(struct hid_device *hid)
{
struct gaff_device *gaff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 84f8c127ebdc..d86a9189e88f 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -470,6 +470,10 @@ static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 10a720558830..8619b80c834c 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -124,13 +124,19 @@ static int holtekff_init(struct hid_device *hid)
{
struct holtekff_device *holtekff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index cc5b09b87ab0..79a28fc91521 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -314,60 +314,24 @@ static void mousevsc_on_receive(struct hv_device *device,
static void mousevsc_on_channel_callback(void *context)
{
- const int packet_size = 0x100;
- int ret;
struct hv_device *device = context;
- u32 bytes_recvd;
- u64 req_id;
struct vmpacket_descriptor *desc;
- unsigned char *buffer;
- int bufferlen = packet_size;
-
- buffer = kmalloc(bufferlen, GFP_ATOMIC);
- if (!buffer)
- return;
-
- do {
- ret = vmbus_recvpacket_raw(device->channel, buffer,
- bufferlen, &bytes_recvd, &req_id);
-
- switch (ret) {
- case 0:
- if (bytes_recvd <= 0) {
- kfree(buffer);
- return;
- }
- desc = (struct vmpacket_descriptor *)buffer;
-
- switch (desc->type) {
- case VM_PKT_COMP:
- break;
-
- case VM_PKT_DATA_INBAND:
- mousevsc_on_receive(device, desc);
- break;
-
- default:
- pr_err("unhandled packet type %d, tid %llx len %d\n",
- desc->type, req_id, bytes_recvd);
- break;
- }
+ foreach_vmbus_pkt(desc, device->channel) {
+ switch (desc->type) {
+ case VM_PKT_COMP:
break;
- case -ENOBUFS:
- kfree(buffer);
- /* Handle large packet */
- bufferlen = bytes_recvd;
- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-
- if (!buffer)
- return;
+ case VM_PKT_DATA_INBAND:
+ mousevsc_on_receive(device, desc);
+ break;
+ default:
+ pr_err("Unhandled packet type %d, tid %llx len %d\n",
+ desc->type, desc->trans_id, desc->len8 * 8);
break;
}
- } while (1);
-
+ }
}
static int mousevsc_connect_to_vsp(struct hv_device *device)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 76969a22b0f2..447e8db21174 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -476,6 +476,8 @@
#define USB_DEVICE_ID_GOOGLE_STAFF 0x502b
#define USB_DEVICE_ID_GOOGLE_WAND 0x502d
#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
+#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
+#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index dd1a6c3a7de6..73d07e35f12a 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -50,11 +50,17 @@ int lg2ff_init(struct hid_device *hid)
{
struct lg2ff_device *lg2ff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
if (!report)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index 9ecb6fd06203..b7e1949f3cf7 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -117,12 +117,19 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
return -ENODEV;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 03f0220062ca..5e6a0cef2a06 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -1253,8 +1253,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
int lg4ff_init(struct hid_device *hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@@ -1266,6 +1266,13 @@ int lg4ff_init(struct hid_device *hid)
int mmode_ret, mmode_idx = -1;
u16 real_product_id;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index c79a6ec43745..aed4ddc397a9 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -115,12 +115,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const signed short *ff_bits = ff_joystick;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 0179f7ed77e5..8e91e2f06cb4 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1669,6 +1669,7 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
#define HIDPP_FF_EFFECTID_NONE -1
#define HIDPP_FF_EFFECTID_AUTOCENTER -2
+#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18
#define HIDPP_FF_MAX_PARAMS 20
#define HIDPP_FF_RESERVED_SLOTS 1
@@ -2009,7 +2010,7 @@ static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
{
struct hidpp_ff_private_data *data = dev->ff->private;
- u8 params[18];
+ u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH];
dbg_hid("Setting autocenter to %d.\n", magnitude);
@@ -2077,23 +2078,34 @@ static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp
static void hidpp_ff_destroy(struct ff_device *ff)
{
struct hidpp_ff_private_data *data = ff->private;
+ struct hid_device *hid = data->hidpp->hid_dev;
+ hid_info(hid, "Unloading HID++ force feedback.\n");
+
+ device_remove_file(&hid->dev, &dev_attr_range);
+ destroy_workqueue(data->wq);
kfree(data->effect_ids);
}
-static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+static int hidpp_ff_init(struct hidpp_device *hidpp,
+ struct hidpp_ff_private_data *data)
{
struct hid_device *hid = hidpp->hid_dev;
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
struct ff_device *ff;
- struct hidpp_report response;
- struct hidpp_ff_private_data *data;
- int error, j, num_slots;
+ int error, j, num_slots = data->num_effects;
u8 version;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (!dev) {
hid_err(hid, "Struct input_dev not set!\n");
return -EINVAL;
@@ -2109,27 +2121,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++)
set_bit(hidpp_ff_effects_v2[j], dev->ffbit);
- /* Read number of slots available in device */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_GET_INFO, NULL, 0, &response);
- if (error) {
- if (error < 0)
- return error;
- hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
- __func__, error);
- return -EPROTO;
- }
-
- num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
-
error = input_ff_create(dev, num_slots);
if (error) {
hid_err(dev, "Failed to create FF device!\n");
return error;
}
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ /*
+ * Create a copy of passed data, so we can transfer memory
+ * ownership to FF core
+ */
+ data = kmemdup(data, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
@@ -2145,10 +2147,7 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
}
data->hidpp = hidpp;
- data->feature_index = feature_index;
data->version = version;
- data->slot_autocenter = 0;
- data->num_effects = num_slots;
for (j = 0; j < num_slots; j++)
data->effect_ids[j] = -1;
@@ -2162,68 +2161,20 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
ff->set_autocenter = hidpp_ff_set_autocenter;
ff->destroy = hidpp_ff_destroy;
-
- /* reset all forces */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_RESET_ALL, NULL, 0, &response);
-
- /* Read current Range */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_GET_APERTURE, NULL, 0, &response);
- if (error)
- hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
- data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
-
/* Create sysfs interface */
error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
if (error)
hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
- /* Read the current gain values */
- error = hidpp_send_fap_command_sync(hidpp, feature_index,
- HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
- if (error)
- hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
- data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
- /* ignore boost value at response.fap.params[2] */
-
/* init the hardware command queue */
atomic_set(&data->workqueue_size, 0);
- /* initialize with zero autocenter to get wheel in usable state */
- hidpp_ff_set_autocenter(dev, 0);
-
hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
version);
return 0;
}
-static int hidpp_ff_deinit(struct hid_device *hid)
-{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
- struct hidpp_ff_private_data *data;
-
- if (!dev) {
- hid_err(hid, "Struct input_dev not found!\n");
- return -EINVAL;
- }
-
- hid_info(hid, "Unloading HID++ force feedback.\n");
- data = dev->ff->private;
- if (!data) {
- hid_err(hid, "Private data not found!\n");
- return -EINVAL;
- }
-
- destroy_workqueue(data->wq);
- device_remove_file(&hid->dev, &dev_attr_range);
-
- return 0;
-}
-
-
/* ************************************************************************** */
/* */
/* Device Support */
@@ -2725,24 +2676,93 @@ static int k400_connect(struct hid_device *hdev, bool connected)
#define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123
-static int g920_get_config(struct hidpp_device *hidpp)
+static int g920_ff_set_autocenter(struct hidpp_device *hidpp,
+ struct hidpp_ff_private_data *data)
{
+ struct hidpp_report response;
+ u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = {
+ [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART,
+ };
+ int ret;
+
+ /* initialize with zero autocenter to get wheel in usable state */
+
+ dbg_hid("Setting autocenter to 0.\n");
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_DOWNLOAD_EFFECT,
+ params, ARRAY_SIZE(params),
+ &response);
+ if (ret)
+ hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n");
+ else
+ data->slot_autocenter = response.fap.params[0];
+
+ return ret;
+}
+
+static int g920_get_config(struct hidpp_device *hidpp,
+ struct hidpp_ff_private_data *data)
+{
+ struct hidpp_report response;
u8 feature_type;
- u8 feature_index;
int ret;
+ memset(data, 0, sizeof(*data));
+
/* Find feature and store for later use */
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
- &feature_index, &feature_type);
+ &data->feature_index, &feature_type);
if (ret)
return ret;
- ret = hidpp_ff_init(hidpp, feature_index);
+ /* Read number of slots available in device */
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_GET_INFO,
+ NULL, 0,
+ &response);
+ if (ret) {
+ if (ret < 0)
+ return ret;
+ hid_err(hidpp->hid_dev,
+ "%s: received protocol error 0x%02x\n", __func__, ret);
+ return -EPROTO;
+ }
+
+ data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+ /* reset all forces */
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_RESET_ALL,
+ NULL, 0,
+ &response);
if (ret)
- hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
- ret);
+ hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n");
- return 0;
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_GET_APERTURE,
+ NULL, 0,
+ &response);
+ if (ret) {
+ hid_warn(hidpp->hid_dev,
+ "Failed to read range from device!\n");
+ }
+ data->range = ret ?
+ 900 : get_unaligned_be16(&response.fap.params[0]);
+
+ /* Read the current gain values */
+ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+ HIDPP_FF_GET_GLOBAL_GAINS,
+ NULL, 0,
+ &response);
+ if (ret)
+ hid_warn(hidpp->hid_dev,
+ "Failed to read gain values from device!\n");
+ data->gain = ret ?
+ 0xffff : get_unaligned_be16(&response.fap.params[0]);
+
+ /* ignore boost value at response.fap.params[2] */
+
+ return g920_ff_set_autocenter(hidpp, data);
}
/* -------------------------------------------------------------------------- */
@@ -3458,34 +3478,45 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
return report->field[0]->report_count + 1;
}
-static bool hidpp_validate_report(struct hid_device *hdev, int id,
- int expected_length, bool optional)
+static bool hidpp_validate_device(struct hid_device *hdev)
{
- int report_length;
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ int id, report_length, supported_reports = 0;
- if (id >= HID_MAX_IDS || id < 0) {
- hid_err(hdev, "invalid HID report id %u\n", id);
- return false;
+ id = REPORT_ID_HIDPP_SHORT;
+ report_length = hidpp_get_report_length(hdev, id);
+ if (report_length) {
+ if (report_length < HIDPP_REPORT_SHORT_LENGTH)
+ goto bad_device;
+
+ supported_reports++;
}
+ id = REPORT_ID_HIDPP_LONG;
report_length = hidpp_get_report_length(hdev, id);
- if (!report_length)
- return optional;
+ if (report_length) {
+ if (report_length < HIDPP_REPORT_LONG_LENGTH)
+ goto bad_device;
- if (report_length < expected_length) {
- hid_warn(hdev, "not enough values in hidpp report %d\n", id);
- return false;
+ supported_reports++;
}
- return true;
-}
+ id = REPORT_ID_HIDPP_VERY_LONG;
+ report_length = hidpp_get_report_length(hdev, id);
+ if (report_length) {
+ if (report_length < HIDPP_REPORT_LONG_LENGTH ||
+ report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+ goto bad_device;
-static bool hidpp_validate_device(struct hid_device *hdev)
-{
- return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
- HIDPP_REPORT_SHORT_LENGTH, false) &&
- hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
- HIDPP_REPORT_LONG_LENGTH, true);
+ supported_reports++;
+ hidpp->very_long_report_length = report_length;
+ }
+
+ return supported_reports;
+
+bad_device:
+ hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+ return false;
}
static bool hidpp_application_equals(struct hid_device *hdev,
@@ -3505,6 +3536,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
int ret;
bool connected;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ struct hidpp_ff_private_data data;
/* report_fixup needs drvdata to be set before we call hid_parse */
hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
@@ -3531,11 +3563,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
- hidpp->very_long_report_length =
- hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
- if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
- hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
-
if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
@@ -3614,7 +3641,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto hid_hw_init_fail;
} else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
- ret = g920_get_config(hidpp);
+ ret = g920_get_config(hidpp, &data);
if (ret)
goto hid_hw_init_fail;
}
@@ -3636,6 +3663,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto hid_hw_start_fail;
}
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+ ret = hidpp_ff_init(hidpp, &data);
+ if (ret)
+ hid_warn(hidpp->hid_dev,
+ "Unable to initialize force feedback support, errno %d\n",
+ ret);
+ }
+
return ret;
hid_hw_init_fail:
@@ -3658,9 +3693,6 @@ static void hidpp_remove(struct hid_device *hdev)
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
- hidpp_ff_deinit(hdev);
-
hid_hw_stop(hdev);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 2cf83856f2e4..2d8b589201a4 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -328,11 +328,17 @@ static int ms_play_effect(struct input_dev *dev, void *data,
static int ms_init_ff(struct hid_device *hdev)
{
- struct hid_input *hidinput = list_entry(hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
struct ms_data *ms = hid_get_drvdata(hdev);
+ if (list_empty(&hdev->inputs)) {
+ hid_err(hdev, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
+
if (!(ms->quirks & MS_QUIRK_FF))
return 0;
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 5a3b3d974d84..2666af02d5c1 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -516,7 +516,7 @@ static void pcmidi_setup_extra_keys(
MY PICTURES => KEY_WORDPROCESSOR
MY MUSIC=> KEY_SPREADSHEET
*/
- unsigned int keys[] = {
+ static const unsigned int keys[] = {
KEY_FN,
KEY_MESSENGER, KEY_CALENDAR,
KEY_ADDRESSBOOK, KEY_DOCUMENTS,
@@ -532,7 +532,7 @@ static void pcmidi_setup_extra_keys(
0
};
- unsigned int *pkeys = &keys[0];
+ const unsigned int *pkeys = &keys[0];
unsigned short i;
if (pm->ifnum != 1) /* only set up ONCE for interace 1 */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 73c0f7a95e2d..4c6ed6ef31f1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2254,9 +2254,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
static int sony_init_ff(struct sony_sc *sc)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
+
+ if (list_empty(&sc->hdev->inputs)) {
+ hid_err(sc->hdev, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index bdfc5ff3b2c5..90acef304536 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -124,12 +124,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
+
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index f90959e94028..3abaca045869 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -54,11 +54,17 @@ static int zpff_init(struct hid_device *hid)
{
struct zpff_device *zpff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
int i, error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
for (i = 0; i < 4; i++) {
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
if (!report)
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 2a7c6e33bb1c..d9c55e30f986 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
-#include <linux/pm_runtime.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/err.h>
@@ -48,8 +47,6 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
-#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
-#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
/* flags */
@@ -172,14 +169,7 @@ static const struct i2c_hid_quirks {
{ USB_VENDOR_ID_WEIDA, HID_ANY_ID,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
- I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
- I2C_HID_QUIRK_NO_RUNTIME_PM },
- { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
- I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
- { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
- I2C_HID_QUIRK_NO_RUNTIME_PM },
- { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
- I2C_HID_QUIRK_NO_RUNTIME_PM },
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
{ 0, 0 }
@@ -397,7 +387,6 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
int ret;
- unsigned long now, delay;
i2c_hid_dbg(ihid, "%s\n", __func__);
@@ -415,22 +404,9 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
goto set_pwr_exit;
}
- if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
- power_state == I2C_HID_PWR_ON) {
- now = jiffies;
- if (time_after(ihid->sleep_delay, now)) {
- delay = jiffies_to_usecs(ihid->sleep_delay - now);
- usleep_range(delay, delay + 1);
- }
- }
-
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
- if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
- power_state == I2C_HID_PWR_SLEEP)
- ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
-
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
@@ -791,11 +767,6 @@ static int i2c_hid_open(struct hid_device *hid)
{
struct i2c_client *client = hid->driver_data;
struct i2c_hid *ihid = i2c_get_clientdata(client);
- int ret = 0;
-
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0)
- return ret;
set_bit(I2C_HID_STARTED, &ihid->flags);
return 0;
@@ -807,27 +778,6 @@ static void i2c_hid_close(struct hid_device *hid)
struct i2c_hid *ihid = i2c_get_clientdata(client);
clear_bit(I2C_HID_STARTED, &ihid->flags);
-
- /* Save some power */
- pm_runtime_put(&client->dev);
-}
-
-static int i2c_hid_power(struct hid_device *hid, int lvl)
-{
- struct i2c_client *client = hid->driver_data;
- struct i2c_hid *ihid = i2c_get_clientdata(client);
-
- i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
-
- switch (lvl) {
- case PM_HINT_FULLON:
- pm_runtime_get_sync(&client->dev);
- break;
- case PM_HINT_NORMAL:
- pm_runtime_put(&client->dev);
- break;
- }
- return 0;
}
struct hid_ll_driver i2c_hid_ll_driver = {
@@ -836,7 +786,6 @@ struct hid_ll_driver i2c_hid_ll_driver = {
.stop = i2c_hid_stop,
.open = i2c_hid_open,
.close = i2c_hid_close,
- .power = i2c_hid_power,
.output_report = i2c_hid_output_report,
.raw_request = i2c_hid_raw_request,
};
@@ -1104,9 +1053,6 @@ static int i2c_hid_probe(struct i2c_client *client,
i2c_hid_acpi_fix_up_power(&client->dev);
- pm_runtime_get_noresume(&client->dev);
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
device_enable_async_suspend(&client->dev);
/* Make sure there is something at this address */
@@ -1114,16 +1060,16 @@ static int i2c_hid_probe(struct i2c_client *client,
if (ret < 0) {
dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
ret = -ENXIO;
- goto err_pm;
+ goto err_regulator;
}
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0)
- goto err_pm;
+ goto err_regulator;
ret = i2c_hid_init_irq(client);
if (ret < 0)
- goto err_pm;
+ goto err_regulator;
hid = hid_allocate_device();
if (IS_ERR(hid)) {
@@ -1154,9 +1100,6 @@ static int i2c_hid_probe(struct i2c_client *client,
goto err_mem_free;
}
- if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
- pm_runtime_put(&client->dev);
-
return 0;
err_mem_free:
@@ -1165,10 +1108,6 @@ err_mem_free:
err_irq:
free_irq(client->irq, ihid);
-err_pm:
- pm_runtime_put_noidle(&client->dev);
- pm_runtime_disable(&client->dev);
-
err_regulator:
regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
ihid->pdata.supplies);
@@ -1181,12 +1120,6 @@ static int i2c_hid_remove(struct i2c_client *client)
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
- if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
- pm_runtime_get_sync(&client->dev);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
-
hid = ihid->hid;
hid_destroy_device(hid);
@@ -1219,25 +1152,15 @@ static int i2c_hid_suspend(struct device *dev)
int wake_status;
if (hid->driver && hid->driver->suspend) {
- /*
- * Wake up the device so that IO issues in
- * HID driver's suspend code can succeed.
- */
- ret = pm_runtime_resume(dev);
- if (ret < 0)
- return ret;
-
ret = hid->driver->suspend(hid, PMSG_SUSPEND);
if (ret < 0)
return ret;
}
- if (!pm_runtime_suspended(dev)) {
- /* Save some power */
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(client->irq);
- }
+ disable_irq(client->irq);
if (device_may_wakeup(&client->dev)) {
wake_status = enable_irq_wake(client->irq);
@@ -1279,11 +1202,6 @@ static int i2c_hid_resume(struct device *dev)
wake_status);
}
- /* We'll resume to full power */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
enable_irq(client->irq);
/* Instead of resetting device, simply powers the device on. This
@@ -1304,30 +1222,8 @@ static int i2c_hid_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
-static int i2c_hid_runtime_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- disable_irq(client->irq);
- return 0;
-}
-
-static int i2c_hid_runtime_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- enable_irq(client->irq);
- i2c_hid_set_power(client, I2C_HID_PWR_ON);
- return 0;
-}
-#endif
-
static const struct dev_pm_ops i2c_hid_pm = {
SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
- SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
- NULL)
};
static const struct i2c_device_id i2c_hid_id_table[] = {
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index 75078c83be1a..d31ea82b84c1 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -323,6 +323,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ /*
+ * There are at least 2 Primebook C11B versions, the older
+ * version has a product-name of "Primebook C11B", and a
+ * bios version / release / firmware revision of:
+ * V2.1.2 / 05/03/2018 / 18.2
+ * The new version has "PRIMEBOOK C11B" as product-name and a
+ * bios version / release / firmware revision of:
+ * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
+ * Only the older version needs this quirk, note the newer
+ * version will not match as it has a different product-name.
+ */
+ .ident = "Trekstor Primebook C11B",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Direkt-Tek DTLAPY116-2",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index 1b0a0cc605e7..513d7a4a1b8a 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -84,7 +84,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
return 0;
out:
dev_err(&cl->device->dev, "error in allocating Tx pool\n");
- ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
return -ENOMEM;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 391f0b225c9a..53a60c81e220 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -912,6 +912,7 @@ static void vmbus_shutdown(struct device *child_device)
drv->shutdown(dev);
}
+#ifdef CONFIG_PM_SLEEP
/*
* vmbus_suspend - Suspend a vmbus device
*/
@@ -949,6 +950,7 @@ static int vmbus_resume(struct device *child_device)
return drv->resume(dev);
}
+#endif /* CONFIG_PM_SLEEP */
/*
* vmbus_device_release - Final callback release of the vmbus child device
@@ -1070,6 +1072,7 @@ msg_handled:
vmbus_signal_eom(msg, message_type);
}
+#ifdef CONFIG_PM_SLEEP
/*
* Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
* hibernation, because hv_sock connections can not persist across hibernation.
@@ -1105,6 +1108,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
vmbus_connection.work_queue,
&ctx->work);
}
+#endif /* CONFIG_PM_SLEEP */
/*
* Direct callback for channels using other deferred processing
@@ -2125,6 +2129,7 @@ acpi_walk_err:
return ret_val;
}
+#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{
struct vmbus_channel *channel, *sc;
@@ -2247,6 +2252,7 @@ static int vmbus_bus_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 95b447cfa24c..b26419dbe840 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -99,6 +99,8 @@ struct nct7904_data {
u8 enable_dts;
u8 has_dts;
u8 temp_mode; /* 0: TR mode, 1: TD mode */
+ u8 fan_alarm[2];
+ u8 vsen_alarm[3];
};
/* Access functions */
@@ -214,7 +216,15 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
SMI_STS5_REG + (channel >> 3));
if (ret < 0)
return ret;
- *val = (ret >> (channel & 0x07)) & 1;
+ if (!data->fan_alarm[channel >> 3])
+ data->fan_alarm[channel >> 3] = ret & 0xff;
+ else
+ /* If there is new alarm showing up */
+ data->fan_alarm[channel >> 3] |= (ret & 0xff);
+ *val = (data->fan_alarm[channel >> 3] >> (channel & 0x07)) & 1;
+ /* Needs to clean the alarm if alarm existing */
+ if (*val)
+ data->fan_alarm[channel >> 3] ^= 1 << (channel & 0x07);
return 0;
default:
return -EOPNOTSUPP;
@@ -298,7 +308,15 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
SMI_STS1_REG + (index >> 3));
if (ret < 0)
return ret;
- *val = (ret >> (index & 0x07)) & 1;
+ if (!data->vsen_alarm[index >> 3])
+ data->vsen_alarm[index >> 3] = ret & 0xff;
+ else
+ /* If there is new alarm showing up */
+ data->vsen_alarm[index >> 3] |= (ret & 0xff);
+ *val = (data->vsen_alarm[index >> 3] >> (index & 0x07)) & 1;
+ /* Needs to clean the alarm if alarm existing */
+ if (*val)
+ data->vsen_alarm[index >> 3] ^= 1 << (index & 0x07);
return 0;
default:
return -EOPNOTSUPP;
@@ -915,12 +933,15 @@ static int nct7904_probe(struct i2c_client *client,
data->temp_mode = 0;
for (i = 0; i < 4; i++) {
- val = (ret & (0x03 << i)) >> (i * 2);
+ val = (ret >> (i * 2)) & 0x03;
bit = (1 << i);
- if (val == 0)
+ if (val == 0) {
data->tcpu_mask &= ~bit;
- else if (val == 0x1 || val == 0x2)
- data->temp_mode |= bit;
+ } else {
+ if (val == 0x1 || val == 0x2)
+ data->temp_mode |= bit;
+ data->vsen_mask &= ~(0x06 << (i * 2));
+ }
}
/* PECI */
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index fa66951b05d0..7b098ff5f5dd 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -108,6 +108,12 @@
#define ASPEED_I2CD_S_TX_CMD BIT(2)
#define ASPEED_I2CD_M_TX_CMD BIT(1)
#define ASPEED_I2CD_M_START_CMD BIT(0)
+#define ASPEED_I2CD_MASTER_CMDS_MASK \
+ (ASPEED_I2CD_M_STOP_CMD | \
+ ASPEED_I2CD_M_S_RX_CMD_LAST | \
+ ASPEED_I2CD_M_RX_CMD | \
+ ASPEED_I2CD_M_TX_CMD | \
+ ASPEED_I2CD_M_START_CMD)
/* 0x18 : I2CD Slave Device Address Register */
#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
@@ -336,18 +342,19 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
u8 slave_addr = i2c_8bit_addr_from_msg(msg);
- bus->master_state = ASPEED_I2C_MASTER_START;
-
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
* If it's requested in the middle of a slave session, set the master
* state to 'pending' then H/W will continue handling this master
* command when the bus comes back to the idle state.
*/
- if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
+ if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
bus->master_state = ASPEED_I2C_MASTER_PENDING;
+ return;
+ }
#endif /* CONFIG_I2C_SLAVE */
+ bus->master_state = ASPEED_I2C_MASTER_START;
bus->buf_index = 0;
if (msg->flags & I2C_M_RD) {
@@ -422,20 +429,6 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
}
}
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
- /*
- * A pending master command will be started by H/W when the bus comes
- * back to idle state after completing a slave operation so change the
- * master state from 'pending' to 'start' at here if slave is inactive.
- */
- if (bus->master_state == ASPEED_I2C_MASTER_PENDING) {
- if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
- goto out_no_complete;
-
- bus->master_state = ASPEED_I2C_MASTER_START;
- }
-#endif /* CONFIG_I2C_SLAVE */
-
/* Master is not currently active, irq was for someone else. */
if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
bus->master_state == ASPEED_I2C_MASTER_PENDING)
@@ -462,11 +455,15 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
* If a peer master starts a xfer immediately after it queues a
- * master command, change its state to 'pending' then H/W will
- * continue the queued master xfer just after completing the
- * slave mode session.
+ * master command, clear the queued master command and change
+ * its state to 'pending'. To simplify handling of pending
+ * cases, it uses S/W solution instead of H/W command queue
+ * handling.
*/
if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
+ writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
+ ~ASPEED_I2CD_MASTER_CMDS_MASK,
+ bus->base + ASPEED_I2C_CMD_REG);
bus->master_state = ASPEED_I2C_MASTER_PENDING;
dev_dbg(bus->dev,
"master goes pending due to a slave start\n");
@@ -629,6 +626,14 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
irq_handled |= aspeed_i2c_master_irq(bus,
irq_remaining);
}
+
+ /*
+ * Start a pending master command at here if a slave operation is
+ * completed.
+ */
+ if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
+ bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ aspeed_i2c_do_start(bus);
#else
irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
#endif /* CONFIG_I2C_SLAVE */
@@ -691,6 +696,15 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus);
+ /*
+ * If timed out and the state is still pending, drop the pending
+ * master command.
+ */
+ spin_lock_irqsave(&bus->lock, flags);
+ if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
+ bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+ spin_unlock_irqrestore(&bus->lock, flags);
+
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 29eae1bf4f86..2152ec5f535c 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -875,7 +875,7 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
{
- if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+ if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN))
return I2C_FUNC_I2C |
(I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
else
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d36cf08461f7..b24e7b937f21 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -305,7 +305,7 @@ struct stm32f7_i2c_dev {
struct regmap *regmap;
};
-/**
+/*
* All these values are coming from I2C Specification, Version 6.0, 4th of
* April 2014.
*
@@ -1192,6 +1192,8 @@ static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev)
STM32F7_I2C_CR1_TXIE;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
+ /* Write 1st data byte */
+ writel_relaxed(value, base + STM32F7_I2C_TXDR);
} else {
/* Notify i2c slave that new write transfer is starting */
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
@@ -1501,7 +1503,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
void __iomem *base = i2c_dev->base;
struct device *dev = i2c_dev->dev;
struct stm32_i2c_dma *dma = i2c_dev->dma;
- u32 mask, status;
+ u32 status;
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
@@ -1526,12 +1528,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
f7_msg->result = -EINVAL;
}
- /* Disable interrupts */
- if (stm32f7_i2c_is_slave_registered(i2c_dev))
- mask = STM32F7_I2C_XFER_IRQ_MASK;
- else
- mask = STM32F7_I2C_ALL_IRQ_MASK;
- stm32f7_i2c_disable_irq(i2c_dev, mask);
+ if (!i2c_dev->slave_running) {
+ u32 mask;
+ /* Disable interrupts */
+ if (stm32f7_i2c_is_slave_registered(i2c_dev))
+ mask = STM32F7_I2C_XFER_IRQ_MASK;
+ else
+ mask = STM32F7_I2C_ALL_IRQ_MASK;
+ stm32f7_i2c_disable_irq(i2c_dev, mask);
+ }
/* Disable dma */
if (i2c_dev->use_dma) {
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index 055227cb3d43..67b8817995c0 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -474,12 +474,17 @@ static int adxl372_configure_fifo(struct adxl372_state *st)
if (ret < 0)
return ret;
- fifo_samples = st->watermark & 0xFF;
+ /*
+ * watermark stores the number of sets; we need to write the FIFO
+ * registers with the number of samples
+ */
+ fifo_samples = (st->watermark * st->fifo_set_size);
fifo_ctl = ADXL372_FIFO_CTL_FORMAT_MODE(st->fifo_format) |
ADXL372_FIFO_CTL_MODE_MODE(st->fifo_mode) |
- ADXL372_FIFO_CTL_SAMPLES_MODE(st->watermark);
+ ADXL372_FIFO_CTL_SAMPLES_MODE(fifo_samples);
- ret = regmap_write(st->regmap, ADXL372_FIFO_SAMPLES, fifo_samples);
+ ret = regmap_write(st->regmap,
+ ADXL372_FIFO_SAMPLES, fifo_samples & 0xFF);
if (ret < 0)
return ret;
@@ -548,8 +553,7 @@ static irqreturn_t adxl372_trigger_handler(int irq, void *p)
goto err;
/* Each sample is 2 bytes */
- for (i = 0; i < fifo_entries * sizeof(u16);
- i += st->fifo_set_size * sizeof(u16))
+ for (i = 0; i < fifo_entries; i += st->fifo_set_size)
iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
}
err:
@@ -571,6 +575,14 @@ static int adxl372_setup(struct adxl372_state *st)
return -ENODEV;
}
+ /*
+ * Perform a software reset to make sure the device is in a consistent
+ * state after start up.
+ */
+ ret = regmap_write(st->regmap, ADXL372_RESET, ADXL372_RESET_CODE);
+ if (ret < 0)
+ return ret;
+
ret = adxl372_set_op_mode(st, ADXL372_STANDBY);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index cf6c0e3a83d3..121b4e89f038 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -117,7 +117,7 @@
#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
#define BMC150_ACCEL_REG_TEMP 0x08
-#define BMC150_ACCEL_TEMP_CENTER_VAL 24
+#define BMC150_ACCEL_TEMP_CENTER_VAL 23
#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
#define BMC150_AUTO_SUSPEND_DELAY_MS 2000
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 5a3ca5904ded..f658012baad8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -810,10 +810,10 @@ static int ad799x_probe(struct i2c_client *client,
ret = ad799x_write_config(st, st->chip_config->default_config);
if (ret < 0)
- goto error_disable_reg;
+ goto error_disable_vref;
ret = ad799x_read_config(st);
if (ret < 0)
- goto error_disable_reg;
+ goto error_disable_vref;
st->config = ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index adc9cf7a075d..8ea2aed6d6f5 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -7,6 +7,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -25,6 +26,11 @@
#define AXP288_ADC_EN_MASK 0xF0
#define AXP288_ADC_TS_ENABLE 0x01
+#define AXP288_ADC_TS_BIAS_MASK GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA (0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA (1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA (2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA (3 << 4)
#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
@@ -177,10 +183,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+ {
+ /* Lenovo Ideapad 100S (11 inch) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+ },
+ .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+ },
+ {}
+};
+
static int axp288_adc_initialize(struct axp288_adc_info *info)
{
+ const struct dmi_system_id *bias_override;
int ret, adc_enable_val;
+ bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+ if (bias_override) {
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_BIAS_MASK,
+ (uintptr_t)bias_override->driver_data);
+ if (ret)
+ return ret;
+ }
+
/*
* Determine if the TS pin is enabled and set the TS current-source
* accordingly.
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 88c7fe15003b..62e6c8badd22 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -100,14 +100,14 @@ struct hx711_data {
static int hx711_cycle(struct hx711_data *hx711_data)
{
- int val;
+ unsigned long flags;
/*
* if preempted for more then 60us while PD_SCK is high:
* hx711 is going in reset
* ==> measuring is false
*/
- preempt_disable();
+ local_irq_save(flags);
gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
/*
@@ -117,7 +117,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
*/
ndelay(hx711_data->data_ready_delay_ns);
- val = gpiod_get_value(hx711_data->gpiod_dout);
/*
* here we are not waiting for 0.2 us as suggested by the datasheet,
* because the oscilloscope showed in a test scenario
@@ -125,7 +124,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
* and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
*/
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
- preempt_enable();
+ local_irq_restore(flags);
/*
* make it a square wave for addressing cases with capacitance on
@@ -133,7 +132,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
*/
ndelay(hx711_data->data_ready_delay_ns);
- return val;
+ /* sample as late as possible */
+ return gpiod_get_value(hx711_data->gpiod_dout);
}
static int hx711_read(struct hx711_data *hx711_data)
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 7b28d045d271..7b27306330a3 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ priv->param->regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!irq)
return -EINVAL;
@@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- priv->param->regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
priv->clkin = devm_clk_get(&pdev->dev, "clkin");
if (IS_ERR(priv->clkin)) {
dev_err(&pdev->dev, "failed to get clkin\n");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 9b85fefc0a96..93a096a91f8c 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -24,33 +24,6 @@
#include "stm32-adc-core.h"
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3 BIT(17)
-#define STM32F4_EOC2 BIT(9)
-#define STM32F4_EOC1 BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT 16
-#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV BIT(18)
-#define STM32H7_EOC_MST BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT 18
-#define STM32H7_PRESC_MASK GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT 16
-#define STM32H7_CKMODE_MASK GENMASK(17, 16)
-
#define STM32_ADC_CORE_SLEEP_DELAY_MS 2000
/* SYSCFG registers */
@@ -71,6 +44,8 @@
* @eoc1: adc1 end of conversion flag in @csr
* @eoc2: adc2 end of conversion flag in @csr
* @eoc3: adc3 end of conversion flag in @csr
+ * @ier: interrupt enable register offset for each adc
+ * @eocie_msk: end of conversion interrupt enable mask in @ier
*/
struct stm32_adc_common_regs {
u32 csr;
@@ -78,6 +53,8 @@ struct stm32_adc_common_regs {
u32 eoc1_msk;
u32 eoc2_msk;
u32 eoc3_msk;
+ u32 ier;
+ u32 eocie_msk;
};
struct stm32_adc_priv;
@@ -303,6 +280,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
.eoc1_msk = STM32F4_EOC1,
.eoc2_msk = STM32F4_EOC2,
.eoc3_msk = STM32F4_EOC3,
+ .ier = STM32F4_ADC_CR1,
+ .eocie_msk = STM32F4_EOCIE,
};
/* STM32H7 common registers definitions */
@@ -311,8 +290,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
.ccr = STM32H7_ADC_CCR,
.eoc1_msk = STM32H7_EOC_MST,
.eoc2_msk = STM32H7_EOC_SLV,
+ .ier = STM32H7_ADC_IER,
+ .eocie_msk = STM32H7_EOCIE,
+};
+
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+ 0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
};
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+ unsigned int adc)
+{
+ u32 ier, offset = stm32_adc_offset[adc];
+
+ ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+ return ier & priv->cfg->regs->eocie_msk;
+}
+
/* ADC common interrupt for all instances */
static void stm32_adc_irq_handler(struct irq_desc *desc)
{
@@ -323,13 +318,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
- if (status & priv->cfg->regs->eoc1_msk)
+ /*
+ * End of conversion may be handled by using IRQ or DMA. There may be a
+ * race here when two conversions complete at the same time on several
+ * ADCs. EOC may be read 'set' for several ADCs, with:
+ * - an ADC configured to use DMA (EOC triggers the DMA request, and
+ * is then automatically cleared by DR read in hardware)
+ * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+ * be called in this case)
+ * So both EOC status bit in CSR and EOCIE control bit must be checked
+ * before invoking the interrupt handler (e.g. call ISR only for
+ * IRQ-enabled ADCs).
+ */
+ if (status & priv->cfg->regs->eoc1_msk &&
+ stm32_adc_eoc_enabled(priv, 0))
generic_handle_irq(irq_find_mapping(priv->domain, 0));
- if (status & priv->cfg->regs->eoc2_msk)
+ if (status & priv->cfg->regs->eoc2_msk &&
+ stm32_adc_eoc_enabled(priv, 1))
generic_handle_irq(irq_find_mapping(priv->domain, 1));
- if (status & priv->cfg->regs->eoc3_msk)
+ if (status & priv->cfg->regs->eoc3_msk &&
+ stm32_adc_eoc_enabled(priv, 2))
generic_handle_irq(irq_find_mapping(priv->domain, 2));
chained_irq_exit(chip, desc);
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 8af507b3f32d..2579d514c2a3 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -25,8 +25,145 @@
* --------------------------------------------------------
*/
#define STM32_ADC_MAX_ADCS 3
+#define STM32_ADC_OFFSET 0x100
#define STM32_ADCX_COMN_OFFSET 0x300
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR 0x00
+#define STM32F4_ADC_CR1 0x04
+#define STM32F4_ADC_CR2 0x08
+#define STM32F4_ADC_SMPR1 0x0C
+#define STM32F4_ADC_SMPR2 0x10
+#define STM32F4_ADC_HTR 0x24
+#define STM32F4_ADC_LTR 0x28
+#define STM32F4_ADC_SQR1 0x2C
+#define STM32F4_ADC_SQR2 0x30
+#define STM32F4_ADC_SQR3 0x34
+#define STM32F4_ADC_JSQR 0x38
+#define STM32F4_ADC_JDR1 0x3C
+#define STM32F4_ADC_JDR2 0x40
+#define STM32F4_ADC_JDR3 0x44
+#define STM32F4_ADC_JDR4 0x48
+#define STM32F4_ADC_DR 0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT BIT(4)
+#define STM32F4_EOC BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT 24
+#define STM32F4_RES_MASK GENMASK(25, 24)
+#define STM32F4_SCAN BIT(8)
+#define STM32F4_EOCIE BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART BIT(30)
+#define STM32F4_EXTEN_SHIFT 28
+#define STM32F4_EXTEN_MASK GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT 24
+#define STM32F4_EXTSEL_MASK GENMASK(27, 24)
+#define STM32F4_EOCS BIT(10)
+#define STM32F4_DDS BIT(9)
+#define STM32F4_DMA BIT(8)
+#define STM32F4_ADON BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3 BIT(17)
+#define STM32F4_EOC2 BIT(9)
+#define STM32F4_EOC1 BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT 16
+#define STM32F4_ADC_ADCPRE_MASK GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR 0x00
+#define STM32H7_ADC_IER 0x04
+#define STM32H7_ADC_CR 0x08
+#define STM32H7_ADC_CFGR 0x0C
+#define STM32H7_ADC_SMPR1 0x14
+#define STM32H7_ADC_SMPR2 0x18
+#define STM32H7_ADC_PCSEL 0x1C
+#define STM32H7_ADC_SQR1 0x30
+#define STM32H7_ADC_SQR2 0x34
+#define STM32H7_ADC_SQR3 0x38
+#define STM32H7_ADC_SQR4 0x3C
+#define STM32H7_ADC_DR 0x40
+#define STM32H7_ADC_DIFSEL 0xC0
+#define STM32H7_ADC_CALFACT 0xC4
+#define STM32H7_ADC_CALFACT2 0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY BIT(12)
+#define STM32H7_EOC BIT(2)
+#define STM32H7_ADRDY BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL BIT(31)
+#define STM32H7_ADCALDIF BIT(30)
+#define STM32H7_DEEPPWD BIT(29)
+#define STM32H7_ADVREGEN BIT(28)
+#define STM32H7_LINCALRDYW6 BIT(27)
+#define STM32H7_LINCALRDYW5 BIT(26)
+#define STM32H7_LINCALRDYW4 BIT(25)
+#define STM32H7_LINCALRDYW3 BIT(24)
+#define STM32H7_LINCALRDYW2 BIT(23)
+#define STM32H7_LINCALRDYW1 BIT(22)
+#define STM32H7_ADCALLIN BIT(16)
+#define STM32H7_BOOST BIT(8)
+#define STM32H7_ADSTP BIT(4)
+#define STM32H7_ADSTART BIT(2)
+#define STM32H7_ADDIS BIT(1)
+#define STM32H7_ADEN BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT 10
+#define STM32H7_EXTEN_MASK GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT 5
+#define STM32H7_EXTSEL_MASK GENMASK(9, 5)
+#define STM32H7_RES_SHIFT 2
+#define STM32H7_RES_MASK GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT 0
+#define STM32H7_DMNGT_MASK GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+ STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */
+ STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */
+ STM32H7_DMNGT_DFSDM, /* DFSDM mode */
+ STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT 16
+#define STM32H7_CALFACT_D_MASK GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT 0
+#define STM32H7_CALFACT_S_MASK GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT 0
+#define STM32H7_LINCALFACT_MASK GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV BIT(18)
+#define STM32H7_EOC_MST BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT 18
+#define STM32H7_PRESC_MASK GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT 16
+#define STM32H7_CKMODE_MASK GENMASK(17, 16)
+
/**
* struct stm32_adc_common - stm32 ADC driver common data (for all instances)
* @base: control registers base cpu addr
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 6a7dd08b1e0b..663f8a5012d6 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -28,115 +28,6 @@
#include "stm32-adc-core.h"
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR 0x00
-#define STM32F4_ADC_CR1 0x04
-#define STM32F4_ADC_CR2 0x08
-#define STM32F4_ADC_SMPR1 0x0C
-#define STM32F4_ADC_SMPR2 0x10
-#define STM32F4_ADC_HTR 0x24
-#define STM32F4_ADC_LTR 0x28
-#define STM32F4_ADC_SQR1 0x2C
-#define STM32F4_ADC_SQR2 0x30
-#define STM32F4_ADC_SQR3 0x34
-#define STM32F4_ADC_JSQR 0x38
-#define STM32F4_ADC_JDR1 0x3C
-#define STM32F4_ADC_JDR2 0x40
-#define STM32F4_ADC_JDR3 0x44
-#define STM32F4_ADC_JDR4 0x48
-#define STM32F4_ADC_DR 0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT BIT(4)
-#define STM32F4_EOC BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT 24
-#define STM32F4_RES_MASK GENMASK(25, 24)
-#define STM32F4_SCAN BIT(8)
-#define STM32F4_EOCIE BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART BIT(30)
-#define STM32F4_EXTEN_SHIFT 28
-#define STM32F4_EXTEN_MASK GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT 24
-#define STM32F4_EXTSEL_MASK GENMASK(27, 24)
-#define STM32F4_EOCS BIT(10)
-#define STM32F4_DDS BIT(9)
-#define STM32F4_DMA BIT(8)
-#define STM32F4_ADON BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR 0x00
-#define STM32H7_ADC_IER 0x04
-#define STM32H7_ADC_CR 0x08
-#define STM32H7_ADC_CFGR 0x0C
-#define STM32H7_ADC_SMPR1 0x14
-#define STM32H7_ADC_SMPR2 0x18
-#define STM32H7_ADC_PCSEL 0x1C
-#define STM32H7_ADC_SQR1 0x30
-#define STM32H7_ADC_SQR2 0x34
-#define STM32H7_ADC_SQR3 0x38
-#define STM32H7_ADC_SQR4 0x3C
-#define STM32H7_ADC_DR 0x40
-#define STM32H7_ADC_DIFSEL 0xC0
-#define STM32H7_ADC_CALFACT 0xC4
-#define STM32H7_ADC_CALFACT2 0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY BIT(12)
-#define STM32H7_EOC BIT(2)
-#define STM32H7_ADRDY BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL BIT(31)
-#define STM32H7_ADCALDIF BIT(30)
-#define STM32H7_DEEPPWD BIT(29)
-#define STM32H7_ADVREGEN BIT(28)
-#define STM32H7_LINCALRDYW6 BIT(27)
-#define STM32H7_LINCALRDYW5 BIT(26)
-#define STM32H7_LINCALRDYW4 BIT(25)
-#define STM32H7_LINCALRDYW3 BIT(24)
-#define STM32H7_LINCALRDYW2 BIT(23)
-#define STM32H7_LINCALRDYW1 BIT(22)
-#define STM32H7_ADCALLIN BIT(16)
-#define STM32H7_BOOST BIT(8)
-#define STM32H7_ADSTP BIT(4)
-#define STM32H7_ADSTART BIT(2)
-#define STM32H7_ADDIS BIT(1)
-#define STM32H7_ADEN BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT 10
-#define STM32H7_EXTEN_MASK GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT 5
-#define STM32H7_EXTSEL_MASK GENMASK(9, 5)
-#define STM32H7_RES_SHIFT 2
-#define STM32H7_RES_MASK GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT 0
-#define STM32H7_DMNGT_MASK GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
- STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */
- STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */
- STM32H7_DMNGT_DFSDM, /* DFSDM mode */
- STM32H7_DMNGT_DMA_CIRC, /* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT 16
-#define STM32H7_CALFACT_D_MASK GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT 0
-#define STM32H7_CALFACT_S_MASK GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT 0
-#define STM32H7_LINCALFACT_MASK GENMASK(29, 0)
-
/* Number of linear calibration shadow registers / LINCALRDYW control bits */
#define STM32H7_LINCALFACT_NUM 6
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 9ac8356d9a95..4998a89d083d 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -35,8 +35,11 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
return -ENOMEM;
adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
- if (!adis->buffer)
+ if (!adis->buffer) {
+ kfree(adis->xfer);
+ adis->xfer = NULL;
return -ENOMEM;
+ }
tx = adis->buffer + burst_length;
tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
@@ -78,8 +81,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL);
- if (!adis->buffer)
+ if (!adis->buffer) {
+ kfree(adis->xfer);
+ adis->xfer = NULL;
return -ENOMEM;
+ }
rx = adis->buffer;
tx = rx + scan_count;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 80e42c7dbcbe..0fe6999b8257 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -99,7 +99,9 @@ struct st_lsm6dsx_fs {
#define ST_LSM6DSX_FS_LIST_SIZE 4
struct st_lsm6dsx_fs_table_entry {
struct st_lsm6dsx_reg reg;
+
struct st_lsm6dsx_fs fs_avl[ST_LSM6DSX_FS_LIST_SIZE];
+ int fs_len;
};
/**
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 2d3495560136..fd5ebe1e1594 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -145,6 +145,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -154,6 +155,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
.fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+ .fs_len = 3,
},
},
},
@@ -215,6 +217,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -225,6 +228,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.decimator = {
@@ -327,6 +331,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -337,6 +342,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.decimator = {
@@ -448,6 +454,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -458,6 +465,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.decimator = {
@@ -563,6 +571,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -573,6 +582,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.batch = {
@@ -693,6 +703,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -703,6 +714,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.batch = {
@@ -800,6 +812,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
.fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
.fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -810,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
.fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
.fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 4,
},
},
.batch = {
@@ -933,11 +947,12 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
int i, err;
fs_table = &sensor->hw->settings->fs_table[sensor->id];
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
+ for (i = 0; i < fs_table->fs_len; i++) {
if (fs_table->fs_avl[i].gain == gain)
break;
+ }
- if (i == ST_LSM6DSX_FS_LIST_SIZE)
+ if (i == fs_table->fs_len)
return -EINVAL;
data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val,
@@ -1196,18 +1211,13 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
const struct st_lsm6dsx_fs_table_entry *fs_table;
- enum st_lsm6dsx_sensor_id id = sensor->id;
struct st_lsm6dsx_hw *hw = sensor->hw;
int i, len = 0;
- fs_table = &hw->settings->fs_table[id];
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
- if (!fs_table->fs_avl[i].gain)
- break;
-
+ fs_table = &hw->settings->fs_table[sensor->id];
+ for (i = 0; i < fs_table->fs_len; i++)
len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
fs_table->fs_avl[i].gain);
- }
buf[len - 1] = '\n';
return len;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 66fbcd94642d..ea472cf6db7b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -61,6 +61,7 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.gain = 1500,
.val = 0x0,
}, /* 1500 uG/LSB */
+ .fs_len = 1,
},
.temp_comp = {
.addr = 0x60,
@@ -92,9 +93,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *sensor;
+ u16 odr;
sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- msleep((2000U / sensor->odr) + 1);
+ odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
+ msleep((2000U / odr) + 1);
}
/**
@@ -555,13 +558,9 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev,
int i, len = 0;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
- u16 val = settings->fs_table.fs_avl[i].gain;
-
- if (val > 0)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- val);
- }
+ for (i = 0; i < settings->fs_table.fs_len; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ settings->fs_table.fs_avl[i].gain);
buf[len - 1] = '\n';
return len;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 08d7e1ef2186..4a1a883dc061 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -314,6 +314,7 @@ config MAX44009
config NOA1305
tristate "ON Semiconductor NOA1305 ambient light sensor"
depends on I2C
+ select REGMAP_I2C
help
Say Y here if you want to build support for the ON Semiconductor
NOA1305 ambient light sensor.
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index e666879007d2..92004a2563ea 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -686,6 +686,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
struct iio_dev *iio = _iio;
struct opt3001 *opt = iio_priv(iio);
int ret;
+ bool wake_result_ready_queue = false;
if (!opt->ok_to_ignore_lock)
mutex_lock(&opt->lock);
@@ -720,13 +721,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
}
opt->result = ret;
opt->result_ready = true;
- wake_up(&opt->result_ready_queue);
+ wake_result_ready_queue = true;
}
out:
if (!opt->ok_to_ignore_lock)
mutex_unlock(&opt->lock);
+ if (wake_result_ready_queue)
+ wake_up(&opt->result_ready_queue);
+
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 51421ac32517..16dacea9eadf 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -398,19 +398,23 @@ static int vcnl4000_probe(struct i2c_client *client,
static const struct of_device_id vcnl_4000_of_match[] = {
{
.compatible = "vishay,vcnl4000",
- .data = "VCNL4000",
+ .data = (void *)VCNL4000,
},
{
.compatible = "vishay,vcnl4010",
- .data = "VCNL4010",
+ .data = (void *)VCNL4010,
},
{
- .compatible = "vishay,vcnl4010",
- .data = "VCNL4020",
+ .compatible = "vishay,vcnl4020",
+ .data = (void *)VCNL4010,
+ },
+ {
+ .compatible = "vishay,vcnl4040",
+ .data = (void *)VCNL4040,
},
{
.compatible = "vishay,vcnl4200",
- .data = "VCNL4200",
+ .data = (void *)VCNL4200,
},
{},
};
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index da10e6ccb43c..5920c0085d35 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -4399,6 +4399,7 @@ error2:
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
+ kfree(port);
while (--i) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
@@ -4407,6 +4408,7 @@ error1:
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
+ kfree(port);
}
free:
kfree(cm_dev);
@@ -4460,6 +4462,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
spin_unlock_irq(&cm.state_lock);
ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
+ kfree(port);
}
kfree(cm_dev);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 0e3cf3461999..d78f67623f24 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2396,9 +2396,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
- goto out;
+ return ret;
}
mutex_unlock(&conn_id->handler_mutex);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3a8b0911c3bc..9d07378b5b42 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
int ib_sa_init(void);
void ib_sa_cleanup(void);
+void rdma_nl_init(void);
void rdma_nl_exit(void);
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 99c4a55545cf..50a92442c4f7 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1987,8 +1987,6 @@ static int iw_query_port(struct ib_device *device,
if (!netdev)
return -ENODEV;
- dev_put(netdev);
-
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
@@ -1996,19 +1994,22 @@ static int iw_query_port(struct ib_device *device,
port_attr->state = IB_PORT_DOWN;
port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else {
- inetdev = in_dev_get(netdev);
+ rcu_read_lock();
+ inetdev = __in_dev_get_rcu(netdev);
if (inetdev && inetdev->ifa_list) {
port_attr->state = IB_PORT_ACTIVE;
port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
- in_dev_put(inetdev);
} else {
port_attr->state = IB_PORT_INIT;
port_attr->phys_state =
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
}
+
+ rcu_read_unlock();
}
+ dev_put(netdev);
err = device->ops.query_port(device, port_num, port_attr);
if (err)
return err;
@@ -2715,6 +2716,8 @@ static int __init ib_core_init(void)
goto err_comp_unbound;
}
+ rdma_nl_init();
+
ret = addr_init();
if (ret) {
pr_warn("Could't init IB address resolution\n");
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 72141c5b7c95..ade71823370f 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ struct ib_qp *qp;
unsigned long flags;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
+
switch (cm_id_priv->state) {
case IW_CM_STATE_LISTEN:
cm_id_priv->state = IW_CM_STATE_DESTROYING;
@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
cm_id_priv->state = IW_CM_STATE_DESTROYING;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
/* Abrupt close of the connection */
- (void)iwcm_modify_qp_err(cm_id_priv->qp);
+ (void)iwcm_modify_qp_err(qp);
spin_lock_irqsave(&cm_id_priv->lock, flags);
break;
case IW_CM_STATE_IDLE:
@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
BUG();
break;
}
- if (cm_id_priv->qp) {
- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
- cm_id_priv->qp = NULL;
- }
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
if (cm_id->mapped) {
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->ops.iw_rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id->device->ops.iw_rem_ref(qp);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
}
@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
struct iwcm_id_private *cm_id_priv;
int ret;
unsigned long flags;
- struct ib_qp *qp;
+ struct ib_qp *qp = NULL;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
return 0; /* success */
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->ops.iw_rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
cm_id_priv->state = IW_CM_STATE_IDLE;
err:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id->device->ops.iw_rem_ref(qp);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
return ret;
@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
+ struct ib_qp *qp = NULL;
unsigned long flags;
int ret;
@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else {
/* REJECTED or RESET */
- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+ qp = cm_id_priv->qp;
cm_id_priv->qp = NULL;
cm_id_priv->state = IW_CM_STATE_IDLE;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
if (iw_event->private_data_len)
@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
struct iw_cm_event *iw_event)
{
+ struct ib_qp *qp;
unsigned long flags;
- int ret = 0;
+ int ret = 0, notify_event = 0;
spin_lock_irqsave(&cm_id_priv->lock, flags);
+ qp = cm_id_priv->qp;
+ cm_id_priv->qp = NULL;
- if (cm_id_priv->qp) {
- cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
- cm_id_priv->qp = NULL;
- }
switch (cm_id_priv->state) {
case IW_CM_STATE_ESTABLISHED:
case IW_CM_STATE_CLOSING:
cm_id_priv->state = IW_CM_STATE_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ notify_event = 1;
break;
case IW_CM_STATE_DESTROYING:
break;
@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (qp)
+ cm_id_priv->id.device->ops.iw_rem_ref(qp);
+ if (notify_event)
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
return ret;
}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 81dbd5f41bed..8cd31ef25eff 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -42,9 +42,12 @@
#include <linux/module.h>
#include "core_priv.h"
-static DEFINE_MUTEX(rdma_nl_mutex);
static struct {
- const struct rdma_nl_cbs *cb_table;
+ const struct rdma_nl_cbs *cb_table;
+ /* Synchronizes between ongoing netlink commands and netlink client
+ * unregistration.
+ */
+ struct rw_semaphore sem;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
bool rdma_nl_chk_listeners(unsigned int group)
@@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
return (op < max_num_ops[type]) ? true : false;
}
-static bool
-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
{
const struct rdma_nl_cbs *cb_table;
- if (!is_nl_msg_valid(type, op))
- return false;
-
/*
* Currently only NLDEV client is supporting netlink commands in
* non init_net net namespace.
*/
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
- return false;
+ return NULL;
- if (!rdma_nl_types[type].cb_table) {
- mutex_unlock(&rdma_nl_mutex);
- request_module("rdma-netlink-subsys-%d", type);
- mutex_lock(&rdma_nl_mutex);
- }
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+ if (!cb_table) {
+ /*
+ * Didn't get valid reference of the table, attempt module
+ * load once.
+ */
+ up_read(&rdma_nl_types[type].sem);
- cb_table = rdma_nl_types[type].cb_table;
+ request_module("rdma-netlink-subsys-%d", type);
+ down_read(&rdma_nl_types[type].sem);
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+ }
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
- return false;
- return true;
+ return NULL;
+ return cb_table;
}
void rdma_nl_register(unsigned int index,
const struct rdma_nl_cbs cb_table[])
{
- mutex_lock(&rdma_nl_mutex);
- if (!is_nl_msg_valid(index, 0)) {
- /*
- * All clients are not interesting in success/failure of
- * this call. They want to see the print to error log and
- * continue their initialization. Print warning for them,
- * because it is programmer's error to be here.
- */
- mutex_unlock(&rdma_nl_mutex);
- WARN(true,
- "The not-valid %u index was supplied to RDMA netlink\n",
- index);
+ if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+ WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
return;
- }
-
- if (rdma_nl_types[index].cb_table) {
- mutex_unlock(&rdma_nl_mutex);
- WARN(true,
- "The %u index is already registered in RDMA netlink\n",
- index);
- return;
- }
- rdma_nl_types[index].cb_table = cb_table;
- mutex_unlock(&rdma_nl_mutex);
+ /* Pairs with the READ_ONCE in is_nl_valid() */
+ smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
}
EXPORT_SYMBOL(rdma_nl_register);
void rdma_nl_unregister(unsigned int index)
{
- mutex_lock(&rdma_nl_mutex);
+ down_write(&rdma_nl_types[index].sem);
rdma_nl_types[index].cb_table = NULL;
- mutex_unlock(&rdma_nl_mutex);
+ up_write(&rdma_nl_types[index].sem);
}
EXPORT_SYMBOL(rdma_nl_unregister);
@@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int index = RDMA_NL_GET_CLIENT(type);
unsigned int op = RDMA_NL_GET_OP(type);
const struct rdma_nl_cbs *cb_table;
+ int err = -EINVAL;
- if (!is_nl_valid(skb, index, op))
+ if (!is_nl_msg_valid(index, op))
return -EINVAL;
- cb_table = rdma_nl_types[index].cb_table;
+ down_read(&rdma_nl_types[index].sem);
+ cb_table = get_cb_table(skb, index, op);
+ if (!cb_table)
+ goto done;
if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
- !netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
+ !netlink_capable(skb, CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
/*
* LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
@@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
*/
if (index == RDMA_NL_LS) {
if (cb_table[op].doit)
- return cb_table[op].doit(skb, nlh, extack);
- return -EINVAL;
+ err = cb_table[op].doit(skb, nlh, extack);
+ goto done;
}
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
@@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = cb_table[op].dump,
};
if (c.dump)
- return netlink_dump_start(skb->sk, skb, nlh, &c);
- return -EINVAL;
+ err = netlink_dump_start(skb->sk, skb, nlh, &c);
+ goto done;
}
if (cb_table[op].doit)
- return cb_table[op].doit(skb, nlh, extack);
-
- return 0;
+ err = cb_table[op].doit(skb, nlh, extack);
+done:
+ up_read(&rdma_nl_types[index].sem);
+ return err;
}
/*
@@ -263,9 +256,7 @@ skip:
static void rdma_nl_rcv(struct sk_buff *skb)
{
- mutex_lock(&rdma_nl_mutex);
rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
- mutex_unlock(&rdma_nl_mutex);
}
int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
@@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
}
EXPORT_SYMBOL(rdma_nl_multicast);
+void rdma_nl_init(void)
+{
+ int idx;
+
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+ init_rwsem(&rdma_nl_types[idx].sem);
+}
+
void rdma_nl_exit(void)
{
int idx;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 7a7474000100..c03af08b80e7 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
container_of(res, struct rdma_counter, res);
if (port && port != counter->port)
- return 0;
+ return -EAGAIN;
/* Dump it even query failed */
rdma_counter_query_stats(counter);
@@ -1230,7 +1230,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto err;
+ goto err_get;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
@@ -1787,10 +1787,6 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
- ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
- if (ret)
- goto err_unbind;
-
if (fill_nldev_handle(msg, device) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
@@ -1799,13 +1795,15 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err_fill;
}
+ ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
+ if (ret)
+ goto err_fill;
+
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_fill:
- rdma_counter_bind_qpn(device, port, qpn, cntn);
-err_unbind:
nlmsg_free(msg);
err:
ib_device_put(device);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 1ab423b19f77..6eb6d2717ca5 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -426,7 +426,7 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
int ret;
rdma_for_each_port (dev, i) {
- is_ib = rdma_protocol_ib(dev, i++);
+ is_ib = rdma_protocol_ib(dev, i);
if (is_ib)
break;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index f67a30fda1ed..163ff7ba92b7 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -451,8 +451,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
* that the hardware will not attempt to access the MR any more.
*/
if (!umem_odp->is_implicit_odp) {
+ mutex_lock(&umem_odp->umem_mutex);
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
kvfree(umem_odp->dma_list);
kvfree(umem_odp->page_list);
}
@@ -719,6 +721,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ lockdep_assert_held(&umem_odp->umem_mutex);
+
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
/* Note that during the run of this function, the
@@ -726,7 +730,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
- mutex_lock(&umem_odp->umem_mutex);
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
if (umem_odp->page_list[idx]) {
@@ -757,7 +760,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
umem_odp->npages--;
}
}
- mutex_unlock(&umem_odp->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 1e5aeb39f774..63f7f7db5902 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
struct ib_uverbs_device {
atomic_t refcount;
- int num_comp_vectors;
+ u32 num_comp_vectors;
struct completion comp;
struct device dev;
/* First group for device attributes, NULL terminated array */
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index f974b6854224..35c2841a569e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid,
void *context)
{
struct find_gid_index_context *ctx = context;
+ u16 vlan_id = 0xffff;
+ int ret;
if (ctx->gid_type != gid_attr->gid_type)
return false;
- if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
- (is_vlan_dev(gid_attr->ndev) &&
- vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+ if (ret)
return false;
- return true;
+ return ctx->vlan_id == vlan_id;
}
static const struct ib_gid_attr *
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index e87fc0408470..347dc242fb88 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
@@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
@@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
-
- skb_get(skb);
- rpl = cplhdr(skb);
- if (!is_t4(adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
- rpl5 = (void *)rpl;
- INIT_TP_WR(rpl5, ep->hwtid);
- } else {
- skb_trim(skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- }
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- ep->hwtid));
-
cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
enable_tcp_timestamps && req->tcpopt.tstamp,
(ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
@@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
}
+
+ skb_get(skb);
+ rpl = cplhdr(skb);
+ if (!is_t4(adapter_type)) {
+ skb_trim(skb, roundup(sizeof(*rpl5), 16));
+ rpl5 = (void *)rpl;
+ INIT_TP_WR(rpl5, ep->hwtid);
+ } else {
+ skb_trim(skb, sizeof(*rpl));
+ INIT_TP_WR(rpl, ep->hwtid);
+ }
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ ep->hwtid));
+
if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index a8b9548bd1a2..599340c1f0b8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -242,10 +242,13 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
}
}
-static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
+static int dump_qp(unsigned long id, struct c4iw_qp *qp,
+ struct c4iw_debugfs_data *qpd)
{
int space;
int cc;
+ if (id != qp->wq.sq.qid)
+ return 0;
space = qpd->bufsize - qpd->pos - 1;
if (space == 0)
@@ -350,7 +353,7 @@ static int qp_open(struct inode *inode, struct file *file)
xa_lock_irq(&qpd->devp->qps);
xa_for_each(&qpd->devp->qps, index, qp)
- dump_qp(qp, qpd);
+ dump_qp(index, qp, qpd);
xa_unlock_irq(&qpd->devp->qps);
qpd->buf[qpd->pos++] = 0;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index aa772ee0706f..35c284af574d 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
{
int err;
- struct fw_ri_tpte tpt;
+ struct fw_ri_tpte *tpt;
u32 stag_idx;
static atomic_t key;
if (c4iw_fatal_error(rdev))
return -EIO;
+ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+ if (!tpt)
+ return -ENOMEM;
+
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
+ kfree(tpt);
return -ENOMEM;
}
mutex_lock(&rdev->stats.lock);
@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
/* write TPT entry */
if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
+ memset(tpt, 0, sizeof(*tpt));
else {
- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
FW_RI_TPTE_STAGSTATE_V(stag_state) |
FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
FW_RI_TPTE_PS_V(page_size));
- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt, skb, wr_waitp);
+ sizeof(*tpt), tpt, skb, wr_waitp);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
}
+ kfree(tpt);
return err;
}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index eb9368be28c1..bbcac539777a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2737,15 +2737,11 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
srq->flags = T4_SRQ_LIMIT_SUPPORT;
- ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
- if (ret)
- goto err_free_queue;
-
if (udata) {
srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
if (!srq_key_mm) {
ret = -ENOMEM;
- goto err_remove_handle;
+ goto err_free_queue;
}
srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
if (!srq_db_key_mm) {
@@ -2789,8 +2785,6 @@ err_free_srq_db_key_mm:
kfree(srq_db_key_mm);
err_free_srq_key_mm:
kfree(srq_key_mm);
-err_remove_handle:
- xa_erase_irq(&rhp->qps, srq->wq.qid);
err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
@@ -2813,8 +2807,6 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
rhp = srq->rhp;
pr_debug("%s id %d\n", __func__, srq->wq.qid);
-
- xa_erase_irq(&rhp->qps, srq->wq.qid);
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext);
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 2395fd4233a7..c61b6022575e 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -65,6 +65,7 @@
#define SDMA_DESCQ_CNT 2048
#define SDMA_DESC_INTR 64
#define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
module_param(sdma_descq_cnt, uint, S_IRUGO);
@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
struct sdma_engine *sde;
if (dd->sdma_pad_dma) {
- dma_free_coherent(&dd->pcidev->dev, 4,
+ dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
(void *)dd->sdma_pad_dma,
dd->sdma_pad_phys);
dd->sdma_pad_dma = NULL;
@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
/* Allocate memory for pad */
- dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
&dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
@@ -1526,8 +1527,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(tmp_sdma_rht);
goto bail;
+ }
+
dd->sdma_rht = tmp_sdma_rht;
dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index b4dcc4d29f84..f21fca3617d5 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
diff = cmp_psn(psn,
flow->flow_state.r_next_psn);
if (diff > 0) {
- if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
- restart_tid_rdma_read_req(rcd,
- qp,
- wqe);
-
/* Drop the packet.*/
goto s_unlock;
} else if (diff < 0) {
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 7bff0a1e713d..089e201d7550 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
-/* 16B trailing buffer */
-static const u8 trail_buf[MAX_16B_PADDING];
-
static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
- ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
- (void *)trail_buf, extra_bytes);
+ ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+ sde->dd->sdma_pad_phys, extra_bytes);
bail_txadd:
return ret;
@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
}
/* add icrc, lt byte, and padding to flit */
if (extra_bytes)
- seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
+ seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+ extra_bytes);
seg_pio_copy_end(pbuf);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 7a89d669f8bf..e82567fcdeb7 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
return;
}
- if (eq->buf_list)
- dma_free_coherent(hr_dev->dev, buf_chk_sz,
- eq->buf_list->buf, eq->buf_list->map);
+ dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
+ eq->buf_list->map);
+ kfree(eq->buf_list);
}
static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 8056930bbe2c..cd9ee1664a69 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2773,6 +2773,10 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
return -ENOMEM;
iwibdev = iwdev->iwibdev;
rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
+ ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
+ if (ret)
+ goto error;
+
ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
if (ret)
goto error;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 59022b744144..d609f4659afb 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1298,29 +1298,6 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
return 0;
}
-static void devx_free_indirect_mkey(struct rcu_head *rcu)
-{
- kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
-}
-
-/* This function to delete from the radix tree needs to be called before
- * destroying the underlying mkey. Otherwise a race might occur in case that
- * other thread will get the same mkey before this one will be deleted,
- * in that case it will fail via inserting to the tree its own data.
- *
- * Note:
- * An error in the destroy is not expected unless there is some other indirect
- * mkey which points to this one. In a kernel cleanup flow it will be just
- * destroyed in the iterative destruction call. In a user flow, in case
- * the application didn't close in the expected order it's its own problem,
- * the mkey won't be part of the tree, in both cases the kernel is safe.
- */
-static void devx_cleanup_mkey(struct devx_obj *obj)
-{
- xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
- mlx5_base_mkey(obj->devx_mr.mmkey.key));
-}
-
static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
struct devx_event_subscription *sub)
{
@@ -1362,8 +1339,16 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
int ret;
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
- devx_cleanup_mkey(obj);
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ /*
+ * The pagefault_single_data_segment() does commands against
+ * the mmkey, we must wait for that to stop before freeing the
+ * mkey, as another allocation could get the same mkey #.
+ */
+ xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(obj->devx_mr.mmkey.key));
+ synchronize_srcu(&dev->mr_srcu);
+ }
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
@@ -1382,12 +1367,6 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
devx_cleanup_subscription(dev, sub_entry);
mutex_unlock(&devx_event_table->event_xa_lock);
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
- call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
- devx_free_indirect_mkey);
- return ret;
- }
-
kfree(obj);
return ret;
}
@@ -1491,26 +1470,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
&obj_id);
WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
- err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
- if (err)
- goto obj_destroy;
- }
-
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
- goto err_copy;
+ goto obj_destroy;
if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
-
obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+ if (err)
+ goto obj_destroy;
+ }
return 0;
-err_copy:
- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
- devx_cleanup_mkey(obj);
obj_destroy:
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2ceaef3ea3fb..1a98ee2e01c4 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -606,7 +606,7 @@ struct mlx5_ib_mr {
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
- int live;
+ unsigned int live;
void *descs_alloc;
int access_flags; /* Needed for rereg MR */
@@ -639,7 +639,6 @@ struct mlx5_ib_mw {
struct mlx5_ib_devx_mr {
struct mlx5_core_mkey mmkey;
int ndescs;
- struct rcu_head rcu;
};
struct mlx5_ib_umr_context {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1eff031ef048..7019c12005f4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -84,32 +84,6 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}
-static void update_odp_mr(struct mlx5_ib_mr *mr)
-{
- if (is_odp_mr(mr)) {
- /*
- * This barrier prevents the compiler from moving the
- * setting of umem->odp_data->private to point to our
- * MR, before reg_umr finished, to ensure that the MR
- * initialization have finished before starting to
- * handle invalidations.
- */
- smp_wmb();
- to_ib_umem_odp(mr->umem)->private = mr;
- /*
- * Make sure we will see the new
- * umem->odp_data->private value in the invalidation
- * routines, before we can get page faults on the
- * MR. Page faults can happen once we put the MR in
- * the tree, below this line. Without the barrier,
- * there can be a fault handling and an invalidation
- * before umem->odp_data->private == mr is visible to
- * the invalidation handler.
- */
- smp_wmb();
- }
-}
-
static void reg_mr_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
@@ -1346,8 +1320,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
set_mr_fields(dev, mr, npages, length, access_flags);
- update_odp_mr(mr);
-
if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
@@ -1363,10 +1335,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- mr->live = 1;
+ if (is_odp_mr(mr)) {
+ to_ib_umem_odp(mr->umem)->private = mr;
atomic_set(&mr->num_pending_prefetch, 0);
}
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ smp_store_release(&mr->live, 1);
return &mr->ibmr;
error:
@@ -1441,6 +1415,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
if (!mr->umem)
return -EINVAL;
+ if (is_odp_mr(mr))
+ return -EOPNOTSUPP;
+
if (flags & IB_MR_REREG_TRANS) {
addr = virt_addr;
len = length;
@@ -1486,8 +1463,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
mr->allocated_from_cache = 0;
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- mr->live = 1;
} else {
/*
* Send a UMR WQE
@@ -1516,7 +1491,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
set_mr_fields(dev, mr, npages, len, access_flags);
- update_odp_mr(mr);
return 0;
err:
@@ -1607,15 +1581,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Prevent new page faults and
* prefetch requests from succeeding
*/
- mr->live = 0;
+ WRITE_ONCE(mr->live, 0);
+
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&dev->mr_srcu);
/* dequeue pending prefetch requests for the mr */
if (atomic_read(&mr->num_pending_prefetch))
flush_workqueue(system_unbound_wq);
WARN_ON(atomic_read(&mr->num_pending_prefetch));
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
if (!umem_odp->is_implicit_odp)
mlx5_ib_invalidate_range(umem_odp,
@@ -1987,14 +1962,25 @@ free:
int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
+ struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
int err;
- err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
- &mmw->mmkey);
- if (!err)
- kfree(mmw);
- return err;
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ xa_erase_irq(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(mmw->mmkey.key));
+ /*
+ * pagefault_single_data_segment() may be accessing mmw under
+ * SRCU if the user bound an ODP MR to this MW.
+ */
+ synchronize_srcu(&dev->mr_srcu);
+ }
+
+ err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+ if (err)
+ return err;
+ kfree(mmw);
+ return 0;
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 2e9b43061797..3f9478d19376 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -178,6 +178,29 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
return;
}
+ /*
+ * The locking here is pretty subtle. Ideally the implicit children
+ * list would be protected by the umem_mutex, however that is not
+ * possible. Instead this uses a weaker update-then-lock pattern:
+ *
+ * srcu_read_lock()
+ * <change children list>
+ * mutex_lock(umem_mutex)
+ * mlx5_ib_update_xlt()
+ * mutex_unlock(umem_mutex)
+ * destroy lkey
+ *
+ * ie any change the children list must be followed by the locked
+ * update_xlt before destroying.
+ *
+ * The umem_mutex provides the acquire/release semantic needed to make
+ * the children list visible to a racing thread. While SRCU is not
+ * technically required, using it gives consistent use of the SRCU
+ * locking around the children list.
+ */
+ lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ lockdep_assert_held(&mr->dev->mr_srcu);
+
odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
nentries * MLX5_IMR_MTT_SIZE, mr);
@@ -202,15 +225,22 @@ static void mr_leaf_free_action(struct work_struct *work)
struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ int srcu_key;
mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu);
- ib_umem_odp_release(odp);
- if (imr->live)
+ if (smp_load_acquire(&imr->live)) {
+ srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+ mutex_lock(&odp_imr->umem_mutex);
mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+ }
+ ib_umem_odp_release(odp);
mlx5_mr_cache_free(mr->dev, mr);
if (atomic_dec_and_test(&imr->num_leaf_free))
@@ -278,7 +308,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
- mutex_unlock(&umem_odp->umem_mutex);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -289,10 +318,12 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
if (unlikely(!umem_odp->npages && mr->parent &&
!umem_odp->dying)) {
- WRITE_ONCE(umem_odp->dying, 1);
+ WRITE_ONCE(mr->live, 0);
+ umem_odp->dying = 1;
atomic_inc(&mr->parent->num_leaf_free);
schedule_work(&umem_odp->work);
}
+ mutex_unlock(&umem_odp->umem_mutex);
}
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
@@ -429,8 +460,6 @@ static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->live = 1;
-
mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
mr->mmkey.key, dev->mdev, mr);
@@ -484,6 +513,8 @@ next_mr:
mtt->parent = mr;
INIT_WORK(&odp->work, mr_leaf_free_action);
+ smp_store_release(&mtt->live, 1);
+
if (!nentries)
start_idx = addr >> MLX5_IMR_MTT_SHIFT;
nentries++;
@@ -536,6 +567,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
init_waitqueue_head(&imr->q_leaf_free);
atomic_set(&imr->num_leaf_free, 0);
atomic_set(&imr->num_pending_prefetch, 0);
+ smp_store_release(&imr->live, 1);
return imr;
}
@@ -555,15 +587,19 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
if (mr->parent != imr)
continue;
+ mutex_lock(&umem_odp->umem_mutex);
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
- if (umem_odp->dying)
+ if (umem_odp->dying) {
+ mutex_unlock(&umem_odp->umem_mutex);
continue;
+ }
- WRITE_ONCE(umem_odp->dying, 1);
+ umem_odp->dying = 1;
atomic_inc(&imr->num_leaf_free);
schedule_work(&umem_odp->work);
+ mutex_unlock(&umem_odp->umem_mutex);
}
up_read(&per_mm->umem_rwsem);
@@ -773,7 +809,7 @@ next_mr:
switch (mmkey->type) {
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!mr->live || !mr->ibmr.pd) {
+ if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
mlx5_ib_dbg(dev, "got dead MR\n");
ret = -EFAULT;
goto srcu_unlock;
@@ -1641,12 +1677,12 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (mr->ibmr.pd != pd) {
+ if (!smp_load_acquire(&mr->live)) {
ret = false;
break;
}
- if (!mr->live) {
+ if (mr->ibmr.pd != pd) {
ret = false;
break;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8937d72ddcf6..5fd071c05944 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
}
/* Only remove the old rate after new rate was set */
- if ((old_rl.rate &&
- !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
- (new_state != MLX5_SQC_STATE_RDY))
+ if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+ (new_state != MLX5_SQC_STATE_RDY)) {
mlx5_rl_remove_rate(dev, &old_rl);
+ if (new_state != MLX5_SQC_STATE_RDY)
+ memset(&new_rl, 0, sizeof(new_rl));
+ }
ibqp->rl = new_rl;
sq->state = new_state;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 5136b835e1ba..dc71b6e16a07 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
struct qedr_dev *qedr = get_qedr_dev(ibdev);
u32 fw_ver = (u32)qedr->attr.fw_ver;
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
(fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 6cac0c88cf39..36cdfbdbd325 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -230,8 +230,6 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
pvrdma_page_dir_cleanup(dev, &srq->pdir);
- kfree(srq);
-
atomic_dec(&dev->num_srqs);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 430314c8abd9..b4317480cee7 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -182,12 +182,19 @@ void siw_qp_llp_close(struct siw_qp *qp)
*/
void siw_qp_llp_write_space(struct sock *sk)
{
- struct siw_cep *cep = sk_to_cep(sk);
+ struct siw_cep *cep;
- cep->sk_write_space(sk);
+ read_lock(&sk->sk_callback_lock);
+
+ cep = sk_to_cep(sk);
+ if (cep) {
+ cep->sk_write_space(sk);
- if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
- (void)siw_sq_start(cep->qp);
+ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ (void)siw_sq_start(cep->qp);
+ }
+
+ read_unlock(&sk->sk_callback_lock);
}
static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
@@ -1305,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
void siw_free_qp(struct kref *ref)
{
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+ struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
struct siw_device *sdev = qp->sdev;
unsigned long flags;
@@ -1327,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
atomic_dec(&sdev->num_qp);
siw_dbg_qp(qp, "free QP\n");
kfree_rcu(qp, rcu);
+ kfree(siw_base_qp);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 869e02b69a01..b18a677832e1 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -604,7 +604,6 @@ out:
int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
{
struct siw_qp *qp = to_siw_qp(base_qp);
- struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
@@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->scq = qp->rcq = NULL;
siw_qp_put(qp);
- kfree(siw_base_qp);
return 0;
}
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index dace8577fa43..79851923ee57 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
onkey->input->phys = onkey->phys;
onkey->input->dev.parent = &pdev->dev;
- if (onkey->key_power)
- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 97e3639e99d0..08520b3a18b8 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -92,11 +92,18 @@ soc_button_device_create(struct platform_device *pdev,
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (gpio < 0 && gpio != -ENOENT) {
- error = gpio;
- goto err_free_mem;
- } else if (!gpio_is_valid(gpio)) {
- /* Skip GPIO if not present */
+ if (!gpio_is_valid(gpio)) {
+ /*
+ * Skip GPIO if not present. Note we deliberately
+ * ignore -EPROBE_DEFER errors here. On some devices
+ * Intel is using so called virtual GPIOs which are not
+ * GPIOs at all but some way for AML code to check some
+ * random status bits without need a custom opregion.
+ * In some cases the resources table we parse points to
+ * such a virtual GPIO, since these are not real GPIOs
+ * we do not have a driver for these so they will never
+ * show up, therefore we ignore -EPROBE_DEFER.
+ */
continue;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 04fe43440a3c..2d8434b7b623 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
leave_breadcrumbs);
}
-static bool elantech_use_host_notify(struct psmouse *psmouse,
- struct elantech_device_info *info)
-{
- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
- return true;
-
- switch (info->bus) {
- case ETP_BUS_PS2_ONLY:
- /* expected case */
- break;
- case ETP_BUS_SMB_HST_NTFY_ONLY:
- case ETP_BUS_PS2_SMB_HST_NTFY:
- /* SMbus implementation is stable since 2018 */
- if (dmi_get_bios_year() >= 2018)
- return true;
- /* fall through */
- default:
- psmouse_dbg(psmouse,
- "Ignoring SMBus bus provider %d\n", info->bus);
- break;
- }
-
- return false;
-}
-
/**
* elantech_setup_smbus - called once the PS/2 devices are enumerated
* and decides to instantiate a SMBus InterTouch device.
@@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
- if (!elantech_use_host_notify(psmouse, info) ||
+ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
@@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
return 0;
}
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+ struct elantech_device_info *info)
+{
+ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+ return true;
+
+ switch (info->bus) {
+ case ETP_BUS_PS2_ONLY:
+ /* expected case */
+ break;
+ case ETP_BUS_SMB_ALERT_ONLY:
+ /* fall-through */
+ case ETP_BUS_PS2_SMB_ALERT:
+ psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+ break;
+ case ETP_BUS_SMB_HST_NTFY_ONLY:
+ /* fall-through */
+ case ETP_BUS_PS2_SMB_HST_NTFY:
+ return true;
+ default:
+ psmouse_dbg(psmouse,
+ "Ignoring SMBus bus provider %d.\n",
+ info->bus);
+ }
+
+ return false;
+}
+
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 772493b1f665..190b9974526b 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
}
mutex_lock(&data->irq_mutex);
- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
data->irq_count);
/*
* At this point, irq_status has all bits that are set in the
@@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
bitmap_copy(data->current_irq_mask, data->new_irq_mask,
data->num_of_irq_regs);
+ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
error_unlock:
mutex_unlock(&data->irq_mutex);
return error;
@@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
struct device *dev = &rmi_dev->dev;
mutex_lock(&data->irq_mutex);
+ bitmap_andnot(data->fn_irq_bits,
+ data->fn_irq_bits, mask, data->irq_count);
bitmap_andnot(data->new_irq_mask,
data->current_irq_mask, mask, data->irq_count);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 5178ea8b5f30..fb43aa708660 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -53,6 +53,7 @@ struct goodix_ts_data {
const char *cfg_name;
struct completion firmware_loading_complete;
unsigned long irq_flags;
+ unsigned int contact_size;
};
#define GOODIX_GPIO_INT_NAME "irq"
@@ -62,6 +63,7 @@ struct goodix_ts_data {
#define GOODIX_MAX_WIDTH 4096
#define GOODIX_INT_TRIGGER 1
#define GOODIX_CONTACT_SIZE 8
+#define GOODIX_MAX_CONTACT_SIZE 9
#define GOODIX_MAX_CONTACTS 10
#define GOODIX_CONFIG_MAX_LENGTH 240
@@ -144,6 +146,19 @@ static const struct dmi_system_id rotated_screen[] = {
{}
};
+static const struct dmi_system_id nine_bytes_report[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "Lenovo YogaBook",
+ /* YB1-X91L/F and YB1-X90L/F */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+ }
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@@ -249,7 +264,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
do {
error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
- data, GOODIX_CONTACT_SIZE + 1);
+ data, ts->contact_size + 1);
if (error) {
dev_err(&ts->client->dev, "I2C transfer error: %d\n",
error);
@@ -262,12 +277,12 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -EPROTO;
if (touch_num > 1) {
- data += 1 + GOODIX_CONTACT_SIZE;
+ data += 1 + ts->contact_size;
error = goodix_i2c_read(ts->client,
GOODIX_READ_COOR_ADDR +
- 1 + GOODIX_CONTACT_SIZE,
+ 1 + ts->contact_size,
data,
- GOODIX_CONTACT_SIZE *
+ ts->contact_size *
(touch_num - 1));
if (error)
return error;
@@ -286,7 +301,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return 0;
}
-static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
int input_x = get_unaligned_le16(&coor_data[1]);
@@ -301,6 +316,21 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
+static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
+{
+ int id = coor_data[1] & 0x0F;
+ int input_x = get_unaligned_le16(&coor_data[3]);
+ int input_y = get_unaligned_le16(&coor_data[5]);
+ int input_w = get_unaligned_le16(&coor_data[7]);
+
+ input_mt_slot(ts->input_dev, id);
+ input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(ts->input_dev, &ts->prop,
+ input_x, input_y, true);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
/**
* goodix_process_events - Process incoming events
*
@@ -311,7 +341,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
- u8 point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
int touch_num;
int i;
@@ -326,8 +356,12 @@ static void goodix_process_events(struct goodix_ts_data *ts)
input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
for (i = 0; i < touch_num; i++)
- goodix_ts_report_touch(ts,
- &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+ if (ts->contact_size == 9)
+ goodix_ts_report_touch_9b(ts,
+ &point_data[1 + ts->contact_size * i]);
+ else
+ goodix_ts_report_touch_8b(ts,
+ &point_data[1 + ts->contact_size * i]);
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
@@ -730,6 +764,13 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
"Applying '180 degrees rotated screen' quirk\n");
}
+ if (dmi_check_system(nine_bytes_report)) {
+ ts->contact_size = 9;
+
+ dev_dbg(&ts->client->dev,
+ "Non-standard 9-bytes report format quirk\n");
+ }
+
error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
@@ -810,6 +851,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ts->client = client;
i2c_set_clientdata(client, ts);
init_completion(&ts->firmware_loading_complete);
+ ts->contact_size = GOODIX_CONTACT_SIZE;
error = goodix_get_gpio_config(ts);
if (error)
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 34923399ece4..1139714e72e2 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
finger[i].is_valid = buf[i + y] >> 7;
if (finger[i].is_valid) {
- finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
- finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
+ finger[i].x = ((buf[i + y] & 0x0070) << 4) |
+ buf[i + y + 1];
+ finger[i].y = ((buf[i + y] & 0x0007) << 8) |
+ buf[i + y + 2];
/* st1232 includes a z-axis / touch strength */
if (ts->chip_info->have_z)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2369b8af81f3..dd555078258c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- pasid = PPR_PASID(*(u64 *)&event[0]);
+ pasid = (event[0] & EVENT_DOMID_MASK_HI) |
+ (event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
@@ -616,7 +617,7 @@ retry:
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+ dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
@@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain)
* to 64 bits.
*/
static bool increase_address_space(struct protection_domain *domain,
+ unsigned long address,
gfp_t gfp)
{
unsigned long flags;
@@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
- if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
- /* address space already 64 bit large */
+ if (address <= PM_LEVEL_SIZE(domain->mode) ||
+ WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
@@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
BUG_ON(!is_power_of_2(page_size));
while (address > PM_LEVEL_SIZE(domain->mode))
- *updated = increase_address_space(domain, gfp) || *updated;
+ *updated = increase_address_space(domain, address, gfp) || *updated;
level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
index c235f79b7a20..5120ce4fdce3 100644
--- a/drivers/iommu/amd_iommu_quirks.c
+++ b/drivers/iommu/amd_iommu_quirks.c
@@ -74,6 +74,19 @@ static const struct dmi_system_id ivrs_quirks[] __initconst = {
.driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
},
{
+ /*
+ * Acer Aspire A315-41 requires the very same workaround as
+ * Dell Latitude 5495
+ */
+ .callback = ivrs_ioapic_quirk_cb,
+ .ident = "Acer Aspire A315-41",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+ },
+ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+ },
+ {
.callback = ivrs_ioapic_quirk_cb,
.ident = "Lenovo ideapad 330S-15ARR",
.matches = {
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index c9c1612d52e0..17bd5a349119 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -130,8 +130,8 @@
#define EVENT_TYPE_INV_PPR_REQ 0x9
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
-#define EVENT_DOMID_MASK 0xffff
-#define EVENT_DOMID_SHIFT 0
+#define EVENT_DOMID_MASK_LO 0xffff
+#define EVENT_DOMID_MASK_HI 0xf0000
#define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b18aac4c105e..7c503a6bc585 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
return 0;
out_clear_smmu:
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
smmu_domain->smmu = NULL;
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 3f974919d3bd..6db6d969e31c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2794,7 +2794,7 @@ static int identity_mapping(struct device *dev)
struct device_domain_info *info;
info = dev->archdata.iommu;
- if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+ if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
return 0;
@@ -3471,7 +3471,7 @@ static bool iommu_need_mapping(struct device *dev)
if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
dma_mask = dev->coherent_dma_mask;
- if (dma_mask >= dma_get_required_mask(dev))
+ if (dma_mask >= dma_direct_get_required_mask(dev))
return false;
/*
@@ -3775,6 +3775,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return nelems;
}
+static u64 intel_get_required_mask(struct device *dev)
+{
+ if (!iommu_need_mapping(dev))
+ return dma_direct_get_required_mask(dev);
+ return DMA_BIT_MASK(32);
+}
+
static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
@@ -3787,6 +3794,7 @@ static const struct dma_map_ops intel_dma_ops = {
.dma_supported = dma_direct_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .get_required_mask = intel_get_required_mask,
};
static void
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4c91359057c5..ca51036aa53c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -166,6 +166,9 @@
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
+#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
+#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
+
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
- struct io_pgtable *iop;
+ struct arm_lpae_io_pgtable *data;
- if (cfg->ias != 48 || cfg->oas > 40)
+ /* No quirks for Mali (hopefully) */
+ if (cfg->quirks)
+ return NULL;
+
+ if (cfg->ias > 48 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
- iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
- if (iop) {
- u64 mair, ttbr;
- /* Copy values as union fields overlap */
- mair = cfg->arm_lpae_s1_cfg.mair[0];
- ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+ data = arm_lpae_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
- cfg->arm_mali_lpae_cfg.memattr = mair;
- cfg->arm_mali_lpae_cfg.transtab = ttbr |
- ARM_MALI_LPAE_TTBR_READ_INNER |
- ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ /* Mali seems to need a full 4-level table regardless of IAS */
+ if (data->levels < ARM_LPAE_MAX_LEVELS) {
+ data->levels = ARM_LPAE_MAX_LEVELS;
+ data->pgd_size = sizeof(arm_lpae_iopte);
}
+ /*
+ * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
+ * best we can do is mimic the out-of-tree driver and hope that the
+ * "implementation-defined caching policy" is good enough. Similarly,
+ * we'll use it for the sake of a valid attribute for our 'device'
+ * index, although callers should never request that in practice.
+ */
+ cfg->arm_mali_lpae_cfg.memattr =
+ (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- return iop;
+ data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ if (!data->pgd)
+ goto out_free_data;
+
+ /* Ensure the empty pgd is visible before TRANSTAB can be written */
+ wmb();
+
+ cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
+ ARM_MALI_LPAE_TTBR_READ_INNER |
+ ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 9da8309f7170..2639fc718117 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev)
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
- irq = platform_get_irq(pdev, 0);
-
/*
* Determine if this IPMMU instance is a root device by checking for
* the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
@@ -1106,10 +1104,9 @@ static int ipmmu_probe(struct platform_device *pdev)
/* Root devices have mandatory IRQs */
if (ipmmu_is_root(mmu)) {
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ found\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
dev_name(&pdev->dev), mmu);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 26290f310f90..4dcbf68dfda4 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -100,6 +100,7 @@ struct rk_iommu {
struct device *dev;
void __iomem **bases;
int num_mmu;
+ int num_irq;
struct clk_bulk_data *clocks;
int num_clocks;
bool reset_disabled;
@@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int err, i, irq;
+ int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
+ iommu->num_irq = platform_irq_count(pdev);
+ if (iommu->num_irq < 0)
+ return iommu->num_irq;
+
iommu->reset_disabled = device_property_read_bool(dev,
"rockchip,disable-mmu-reset");
@@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- i = 0;
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
+
if (irq < 0)
return irq;
@@ -1245,10 +1251,13 @@ err_unprepare_clocks:
static void rk_iommu_shutdown(struct platform_device *pdev)
{
struct rk_iommu *iommu = platform_get_drvdata(pdev);
- int i = 0, irq;
+ int i;
+
+ for (i = 0; i < iommu->num_irq; i++) {
+ int irq = platform_get_irq(pdev, i);
- while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
devm_free_irq(iommu->dev, irq, iommu);
+ }
pm_runtime_force_suspend(&pdev->dev);
}
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
index 1a57cee3efab..0b0a73739756 100644
--- a/drivers/irqchip/irq-al-fic.c
+++ b/drivers/irqchip/irq-al-fic.c
@@ -15,6 +15,7 @@
/* FIC Registers */
#define AL_FIC_CAUSE 0x00
+#define AL_FIC_SET_CAUSE 0x08
#define AL_FIC_MASK 0x10
#define AL_FIC_CONTROL 0x28
@@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct al_fic *fic = gc->private;
+
+ writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+ return 1;
+}
+
static int al_fic_register(struct device_node *node,
struct al_fic *fic)
{
@@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node,
gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+ gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
gc->private = fic;
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 6acad2ea0fb3..29333497ba10 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void)
static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+ { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
};
@@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node,
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
}
IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS 50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 62e54f1a248b..787e8eec9a7f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -175,6 +175,22 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static u16 get_its_list(struct its_vm *vm)
+{
+ struct its_node *its;
+ unsigned long its_list = 0;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ if (vm->vlpi_count[its->list_nr])
+ __set_bit(its->list_nr, &its_list);
+ }
+
+ return (u16)its_list;
+}
+
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event)
{
@@ -976,17 +992,15 @@ static void its_send_vmapp(struct its_node *its,
static void its_send_vmovp(struct its_vpe *vpe)
{
- struct its_cmd_desc desc;
+ struct its_cmd_desc desc = {};
struct its_node *its;
unsigned long flags;
int col_id = vpe->col_idx;
desc.its_vmovp_cmd.vpe = vpe;
- desc.its_vmovp_cmd.its_list = (u16)its_list_map;
if (!its_list_map) {
its = list_first_entry(&its_nodes, struct its_node, entry);
- desc.its_vmovp_cmd.seq_num = 0;
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
return;
@@ -1003,6 +1017,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
raw_spin_lock_irqsave(&vmovp_lock, flags);
desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+ desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
/* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 422664ac5f53..1edc99335a94 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
/*
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index c72c036aea76..7d0a12fe2714 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
}
}
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
{
unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
{
plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- if (!irqd_irq_disabled(d)) {
- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
- }
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
}
#endif
+static void plic_irq_eoi(struct irq_data *d)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
- /*
- * There is no need to mask/unmask PLIC interrupts. They are "masked"
- * by reading claim and "unmasked" when writing it back.
- */
- .irq_enable = plic_irq_enable,
- .irq_disable = plic_irq_disable,
+ .irq_mask = plic_irq_mask,
+ .irq_unmask = plic_irq_unmask,
+ .irq_eoi = plic_irq_eoi,
#ifdef CONFIG_SMP
.irq_set_affinity = plic_set_affinity,
#endif
@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
irq_set_chip_data(irq, NULL);
irq_set_noprobe(irq);
return 0;
@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
hwirq);
else
generic_handle_irq(irq);
- writel(hwirq, claim);
}
csr_set(sie, SIE_SEIE);
}
@@ -251,8 +252,8 @@ static int __init plic_init(struct device_node *node,
continue;
}
- /* skip context holes */
- if (parent.args[0] == -1)
+ /* skip contexts other than supervisor external interrupt */
+ if (parent.args[0] != IRQ_S_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index c92b405b7646..ba8619524231 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait)
poll_wait(file, &(cdev->recvwait), wait);
mask = EPOLLOUT | EPOLLWRNORM;
- if (!skb_queue_empty(&cdev->recvqueue))
+ if (!skb_queue_empty_lockless(&cdev->recvqueue))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index e4fa2a2824af..7e2bc5068019 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -173,8 +173,8 @@ symbolic(struct hfcusb_symbolic_list list[], const int num)
/*
- * List of all supported enpoints configiration sets, used to find the
- * best matching endpoint configuration within a devices' USB descriptor.
+ * List of all supported endpoint configuration sets, used to find the
+ * best matching endpoint configuration within a device's USB descriptor.
* We need at least 3 RX endpoints, and 3 TX endpoints, either
* INT-in and ISO-out, or ISO-in and ISO-out)
* with 4 RX endpoints even E-Channel logging is possible
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 4a3e748a1c26..0303b23c2b02 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -27,7 +27,6 @@ MODULE_VERSION(ISAR_REV);
#define DEBUG_HW_FIRMWARE_FIFO 0x10000
-static const u8 faxmodulation_s[] = "3,24,48,72,73,74,96,97,98,121,122,145,146";
static const u8 faxmodulation[] = {3, 24, 48, 72, 73, 74, 96, 97, 98, 121,
122, 145, 146};
#define FAXMODCNT 13
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 705c6200814b..7b726f00f183 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -18,7 +18,7 @@
static int clamped;
static struct wf_control *clamp_control;
-static struct dev_pm_qos_request qos_req;
+static struct freq_qos_request qos_req;
static unsigned int min_freq, max_freq;
static int clamp_set(struct wf_control *ct, s32 value)
@@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value)
}
clamped = value;
- return dev_pm_qos_update_request(&qos_req, freq);
+ return freq_qos_update_request(&qos_req, freq);
}
static int clamp_get(struct wf_control *ct, s32 *value)
@@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void)
min_freq = policy->cpuinfo.min_freq;
max_freq = policy->cpuinfo.max_freq;
+
+ ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
+ max_freq);
+
cpufreq_cpu_put(policy);
+ if (ret < 0) {
+ pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+ ret);
+ return ret;
+ }
+
dev = get_cpu_device(0);
if (unlikely(!dev)) {
pr_warn("%s: No cpu device for cpu0\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
- if (clamp == NULL)
- return -ENOMEM;
-
- ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
- max_freq);
- if (ret < 0) {
- pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
- ret);
- goto free;
+ if (clamp == NULL) {
+ ret = -ENOMEM;
+ goto fail;
}
clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp";
ret = wf_register_control(clamp);
if (ret)
- goto fail;
+ goto free;
+
clamp_control = clamp;
return 0;
- fail:
- dev_pm_qos_remove_request(&qos_req);
free:
kfree(clamp);
+ fail:
+ freq_qos_remove_request(&qos_req);
return ret;
}
@@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void)
{
if (clamp_control) {
wf_unregister_control(clamp_control);
- dev_pm_qos_remove_request(&qos_req);
+ freq_qos_remove_request(&qos_req);
}
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index d249cf8ac277..8346e6d1816c 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{
- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+ return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
}
static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{
struct dm_cache_migration *mg;
- mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
- if (!mg)
- return NULL;
+ mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
memset(mg, 0, sizeof(*mg));
@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
- if (!cell_prealloc) {
- defer_bio(cache, bio);
- return false;
- }
build_key(oblock, end, &key);
r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
- if (!prealloc) {
- DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
- mg_complete(mg, false);
- return -ENOMEM;
- }
/*
* Prevent writes to the block, but allow reads to continue.
@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
}
mg = alloc_migration(cache);
- if (!mg) {
- policy_complete_background_work(cache->policy, op, false);
- background_work_end(cache);
- return -ENOMEM;
- }
mg->op = op;
mg->overwrite_bio = bio;
@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache);
- if (!prealloc) {
- invalidate_complete(mg, false);
- return -ENOMEM;
- }
build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
r = dm_cell_lock_v2(cache->prison, &key,
@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -EPERM;
mg = alloc_migration(cache);
- if (!mg) {
- background_work_end(cache);
- return -ENOMEM;
- }
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index cd6f9e9fc98e..4ca8f1977222 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -591,8 +591,8 @@ static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
*
* NOTE: Must be called with the bucket lock held
*/
-struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
- unsigned long region_nr)
+static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
+ unsigned long region_nr)
{
struct dm_clone_region_hydration *hd;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f150f5c5492b..4fb1a40e68a0 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -18,7 +18,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
#include "dm.h"
@@ -107,8 +106,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
- /* Maximum number of in-flight COW jobs. */
- struct semaphore cow_count;
+ unsigned in_progress;
+ struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@@ -162,8 +161,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+ init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti)
dm_put_device(ti, s->origin);
+ WARN_ON(s->in_progress);
+
kfree(s);
}
+static void account_start_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ s->in_progress++;
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ BUG_ON(!s->in_progress);
+ s->in_progress--;
+ if (likely(s->in_progress <= cow_threshold) &&
+ unlikely(waitqueue_active(&s->in_progress_wait)))
+ wake_up_locked(&s->in_progress_wait);
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+ if (unlikely(s->in_progress > cow_threshold)) {
+ spin_lock(&s->in_progress_wait.lock);
+ if (likely(s->in_progress > cow_threshold)) {
+ /*
+ * NOTE: this throttle doesn't account for whether
+ * the caller is servicing an IO that will trigger a COW
+ * so excess throttling may result for chunks not required
+ * to be COW'd. But if cow_threshold was reached, extra
+ * throttling is unlikely to negatively impact performance.
+ */
+ DECLARE_WAITQUEUE(wait, current);
+ __add_wait_queue(&s->in_progress_wait, &wait);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&s->in_progress_wait.lock);
+ if (unlock_origins)
+ up_read(&_origins_lock);
+ io_schedule();
+ remove_wait_queue(&s->in_progress_wait, &wait);
+ return false;
+ }
+ spin_unlock(&s->in_progress_wait.lock);
+ }
+ return true;
+}
+
/*
* Flush a list of buffers.
*/
@@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio)
}
}
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- r = do_origin(s->origin, bio);
+ r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
- up(&s->cow_count);
+ account_end_copy(s);
}
/*
@@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
- down(&s->cow_count);
+ account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
- down(&s->cow_count);
+ account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context)
struct bio *bio = context;
struct dm_snapshot *s = bio->bi_private;
- up(&s->cow_count);
+ account_end_copy(s);
bio->bi_status = write_err ? BLK_STS_IOERR : 0;
bio_endio(bio);
}
@@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
dest.sector = bio->bi_iter.bi_sector;
dest.count = s->store->chunk_size;
- down(&s->cow_count);
+ account_start_copy(s);
WARN_ON_ONCE(bio->bi_private);
bio->bi_private = s;
dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
@@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return DM_MAPIO_KILL;
+ if (bio_data_dir(bio) == WRITE) {
+ while (unlikely(!wait_for_in_progress(s, false)))
+ ; /* wait_for_in_progress() has slept */
+ }
+
down_read(&s->lock);
dm_exception_table_lock(&lock);
@@ -2112,7 +2163,7 @@ redirect_to_origin:
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
- return do_origin(s->origin, bio);
+ return do_origin(s->origin, bio, false);
}
out_unlock:
@@ -2487,15 +2538,24 @@ next_snapshot:
/*
* Called on a write from the origin driver.
*/
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
+again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
- if (o)
+ if (o) {
+ if (limit) {
+ struct dm_snapshot *s;
+ list_for_each_entry(s, &o->snapshots, list)
+ if (unlikely(!wait_for_in_progress(s, true)))
+ goto again;
+ }
+
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+ }
up_read(&_origins_lock);
return r;
@@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
- return do_origin(o->dev, bio);
+ return do_origin(o->dev, bio, true);
}
/*
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f61693e59684..1e772287b1c8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -154,7 +154,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
} else {
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
mdname(mddev));
- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
err = -ENOTSUPP;
goto abort;
}
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index cfca3c70599b..21f90a887485 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -643,8 +643,7 @@ static int v4l_stk_release(struct file *fp)
dev->owner = NULL;
}
- if (is_present(dev))
- usb_autopm_put_interface(dev->interface);
+ usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->lock);
return v4l2_fh_release(fp);
}
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 32747425297d..64fff6abe60e 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
if (!cnt) {
rc = -ENODEV;
pci_dev_busy = 1;
- goto err_out;
+ goto err_out_int;
}
jm = kzalloc(sizeof(struct jmb38x_ms)
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 310dae26ddff..b2c325ead1c8 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -129,11 +129,27 @@ static int mt6397_irq_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
mt6397_irq_resume);
+struct chip_data {
+ u32 cid_addr;
+ u32 cid_shift;
+};
+
+static const struct chip_data mt6323_core = {
+ .cid_addr = MT6323_CID,
+ .cid_shift = 0,
+};
+
+static const struct chip_data mt6397_core = {
+ .cid_addr = MT6397_CID,
+ .cid_shift = 0,
+};
+
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
unsigned int id;
struct mt6397_chip *pmic;
+ const struct chip_data *pmic_core;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
@@ -149,28 +165,30 @@ static int mt6397_probe(struct platform_device *pdev)
if (!pmic->regmap)
return -ENODEV;
- platform_set_drvdata(pdev, pmic);
+ pmic_core = of_device_get_match_data(&pdev->dev);
+ if (!pmic_core)
+ return -ENODEV;
- ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+ ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
if (ret) {
- dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
return ret;
}
+ pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+ platform_set_drvdata(pdev, pmic);
+
pmic->irq = platform_get_irq(pdev, 0);
if (pmic->irq <= 0)
return pmic->irq;
- switch (id & 0xff) {
- case MT6323_CHIP_ID:
- pmic->int_con[0] = MT6323_INT_CON0;
- pmic->int_con[1] = MT6323_INT_CON1;
- pmic->int_status[0] = MT6323_INT_STATUS0;
- pmic->int_status[1] = MT6323_INT_STATUS1;
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
+ ret = mt6397_irq_init(pmic);
+ if (ret)
+ return ret;
+ switch (pmic->chip_id) {
+ case MT6323_CHIP_ID:
ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
ARRAY_SIZE(mt6323_devs), NULL,
0, pmic->irq_domain);
@@ -178,21 +196,13 @@ static int mt6397_probe(struct platform_device *pdev)
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
- pmic->int_con[0] = MT6397_INT_CON0;
- pmic->int_con[1] = MT6397_INT_CON1;
- pmic->int_status[0] = MT6397_INT_STATUS0;
- pmic->int_status[1] = MT6397_INT_STATUS1;
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
-
ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
ARRAY_SIZE(mt6397_devs), NULL,
0, pmic->irq_domain);
break;
default:
- dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+ dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
return -ENODEV;
}
@@ -205,9 +215,15 @@ static int mt6397_probe(struct platform_device *pdev)
}
static const struct of_device_id mt6397_of_match[] = {
- { .compatible = "mediatek,mt6397" },
- { .compatible = "mediatek,mt6323" },
- { }
+ {
+ .compatible = "mediatek,mt6323",
+ .data = &mt6323_core,
+ }, {
+ .compatible = "mediatek,mt6397",
+ .data = &mt6397_core,
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mt6397_of_match);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 47ae84afac2e..1b1a794d639d 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -527,6 +527,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
FASTRPC_PHYS(buffer->phys), buffer->size);
if (ret < 0) {
dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+ kfree(a);
return -EINVAL;
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 32e9b1aed2ca..0a2b99e1af45 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -218,13 +218,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
+ /* No need to enable the client if nothing is needed from it */
+ if (!cldev->bus->fw_f_fw_ver_supported &&
+ !cldev->bus->hbm_f_os_supported)
+ return;
+
ret = mei_cldev_enable(cldev);
if (ret)
return;
- ret = mei_fwver(cldev);
- if (ret < 0)
- dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+ if (cldev->bus->fw_f_fw_ver_supported) {
+ ret = mei_fwver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "FW version command failed %d\n",
+ ret);
+ }
if (cldev->bus->hbm_f_os_supported) {
ret = mei_osver(cldev);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 77f7dff7098d..c09f8bb49495 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -79,6 +79,9 @@
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index abe1b1f4362f..c4f6991d3028 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1355,6 +1355,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
+#define MEI_CFG_FW_VER_SUPP \
+ .fw_ver_supported = 1
#define MEI_CFG_ICH_HFS \
.fw_status.count = 0
@@ -1392,31 +1394,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
MEI_CFG_ICH10_HFS,
};
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
MEI_CFG_PCH_HFS,
};
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+ MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
MEI_CFG_PCH_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_NM,
};
/* PCH8 Lynx Point and newer devices */
static const struct mei_cfg mei_me_pch8_cfg = {
MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
};
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
static const struct mei_cfg mei_me_pch8_sps_cfg = {
MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
};
/* Cannon Lake and newer devices */
static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_DMA_128,
};
@@ -1428,7 +1440,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_UNDEF_CFG] = NULL,
[MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
[MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
- [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+ [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+ [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1473,6 +1486,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
hw->cfg = cfg;
+ dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
return dev;
}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 08c84a0de4a8..1d8794828cbc 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -20,11 +20,13 @@
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
* @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
size_t dma_size[DMA_DSCR_NUM];
+ u32 fw_ver_supported:1;
};
@@ -62,7 +64,8 @@ struct mei_me_hw {
* @MEI_ME_UNDEF_CFG: Lower sentinel.
* @MEI_ME_ICH_CFG: I/O Controller Hub legacy devices.
* @MEI_ME_ICH10_CFG: I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG: Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG: Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG: Platform Controller Hub platforms (Gen7).
* @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
@@ -77,7 +80,8 @@ enum mei_cfg_idx {
MEI_ME_UNDEF_CFG,
MEI_ME_ICH_CFG,
MEI_ME_ICH10_CFG,
- MEI_ME_PCH_CFG,
+ MEI_ME_PCH6_CFG,
+ MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f71a023aed3c..0f2141178299 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -426,6 +426,8 @@ struct mei_fw_version {
*
* @fw_ver : FW versions
*
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
* @me_clients_map : FW clients bit map
@@ -506,6 +508,8 @@ struct mei_device {
struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
+ unsigned int fw_f_fw_ver_supported:1;
+
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index d5a92c6eadb3..3dca63eddaa0 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -61,13 +61,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index f7bdae5354c3..5047f7343ffc 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cq_host->slot[tag].flags = 0;
cq_host->qcnt += 1;
-
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
pr_debug("%s: cqhci: doorbell not set for tag %d\n",
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 78e7e350655c..4031217d21c3 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
#include <linux/highmem.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
@@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
ssp->ssp_pio_words[2] = cmd1;
ssp->dma_dir = DMA_NONE;
ssp->slave_dirn = DMA_TRANS_NONE;
- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
@@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
host->data = data;
ssp->dma_dir = dma_data_dir;
ssp->slave_dirn = slave_dirn;
- desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc)
goto out;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index d4ada5cca2d1..234551a68739 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -646,8 +646,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct tmio_mmc_dma *dma_priv;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
+ int num_irqs, irq, ret, i;
struct resource *res;
- int irq, ret, i;
u16 ver;
of_data = of_device_get_match_data(&pdev->dev);
@@ -825,24 +825,31 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->hs400_complete = renesas_sdhi_hs400_complete;
}
- i = 0;
- while (1) {
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ ret = num_irqs;
+ goto eirq;
+ }
+
+ /* There must be at least one IRQ source */
+ if (!num_irqs) {
+ ret = -ENXIO;
+ goto eirq;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
- i++;
+ if (irq < 0) {
+ ret = irq;
+ goto eirq;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
- /* There must be at least one IRQ source */
- if (!i) {
- ret = irq;
- goto eirq;
- }
-
dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 2b9cdcd1dd9d..f4f5f0a70cda 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -262,6 +262,7 @@ static const struct sdhci_iproc_data bcm2835_data = {
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.ops = &sdhci_iproc_32only_ops,
};
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 41c2677c587f..083e7e053c95 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
* on temperature
*/
if (temperature < -20000)
- phase_delay = min(max_window + 4 * max_len - 24,
+ phase_delay = min(max_window + 4 * (max_len - 1) - 24,
max_window +
DIV_ROUND_UP(13 * max_len, 16) * 4);
else if (temperature < 20000)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 81bd9afb0980..98c575de43c7 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1393,11 +1393,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
const char *name;
irq[0] = platform_get_irq(pdev, 0);
- irq[1] = platform_get_irq(pdev, 1);
- if (irq[0] < 0) {
- dev_err(dev, "Get irq error\n");
+ irq[1] = platform_get_irq_optional(pdev, 1);
+ if (irq[0] < 0)
return -ENXIO;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 97a97a9ccc36..e10b76089048 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -134,16 +134,15 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
/**
* au_read_buf16 - read chip data into buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 16bit buswidth
*/
-static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
u16 *p = (u16 *) buf;
len >>= 1;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 1d8621d43160..7acf4a93b592 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -487,7 +487,7 @@ static int write_sr(struct spi_nor *nor, u8 val)
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+ SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
return spi_mem_exec_op(nor->spimem, &op);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8c79bad2a9a5..4f2e6910c623 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
struct bond_vlan_tag *tags;
if (is_vlan_dev(upper) &&
- bond->nest_level == vlan_get_encap_level(upper) - 1) {
+ bond->dev->lower_level == upper->lower_level - 1) {
if (upper->addr_assign_type == NET_ADDR_STOLEN) {
alb_send_lp_vid(slave, mac_addr,
vlan_dev_vlan_proto(upper),
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 931d9d935686..0059e6ba6a83 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -200,6 +200,51 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+static const struct flow_dissector_key flow_keys_bonding_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_TIPC,
+ .offset = offsetof(struct flow_keys, addrs.tipckey),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_ICMP,
+ .offset = offsetof(struct flow_keys, icmp),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_VLAN,
+ .offset = offsetof(struct flow_keys, vlan),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+ .offset = offsetof(struct flow_keys, tags),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+ .offset = offsetof(struct flow_keys, keyid),
+ },
+};
+
+static struct flow_dissector flow_keys_bonding __read_mostly;
+
/*-------------------------- Forward declarations ---------------------------*/
static int bond_init(struct net_device *bond_dev);
@@ -1733,8 +1778,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_upper_unlink;
}
- bond->nest_level = dev_get_nest_level(bond_dev) + 1;
-
/* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
@@ -1816,7 +1859,8 @@ err_detach:
slave_disable_netpoll(new_slave);
err_close:
- slave_dev->priv_flags &= ~IFF_BONDING;
+ if (!netif_is_bond_master(slave_dev))
+ slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_restore_mac:
@@ -1956,9 +2000,6 @@ static int __bond_release_one(struct net_device *bond_dev,
if (!bond_has_slaves(bond)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
- bond->nest_level = SINGLE_DEPTH_NESTING;
- } else {
- bond->nest_level = dev_get_nest_level(bond_dev) + 1;
}
unblock_netpoll_tx();
@@ -2017,7 +2058,8 @@ static int __bond_release_one(struct net_device *bond_dev,
else
dev_set_mtu(slave_dev, slave->original_mtu);
- slave_dev->priv_flags &= ~IFF_BONDING;
+ if (!netif_is_bond_master(slave_dev))
+ slave_dev->priv_flags &= ~IFF_BONDING;
bond_free_slave(slave);
@@ -3263,10 +3305,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
const struct iphdr *iph;
int noff, proto = -1;
- if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
- return skb_flow_dissect_flow_keys(skb, fk, 0);
+ if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23) {
+ memset(fk, 0, sizeof(*fk));
+ return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
+ fk, NULL, 0, 0, 0, 0);
+ }
fk->ports.ports = 0;
+ memset(&fk->icmp, 0, sizeof(fk->icmp));
noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
@@ -3286,8 +3332,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
} else {
return false;
}
- if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
- fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0) {
+ if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6)
+ skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
+ skb_transport_offset(skb),
+ skb_headlen(skb));
+ else
+ fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
+ }
return true;
}
@@ -3314,10 +3366,14 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
- bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+ bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
hash = bond_eth_hash(skb);
- else
- hash = (__force u32)flow.ports.ports;
+ } else {
+ if (flow.icmp.id)
+ memcpy(&hash, &flow.icmp, sizeof(hash));
+ else
+ memcpy(&hash, &flow.ports.ports, sizeof(hash));
+ }
hash ^= (__force u32)flow_get_u32_dst(&flow) ^
(__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
@@ -3442,13 +3498,6 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
}
}
-static int bond_get_nest_level(struct net_device *bond_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
-
- return bond->nest_level;
-}
-
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3457,7 +3506,7 @@ static void bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
- spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
+ spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
rcu_read_lock();
@@ -4039,7 +4088,7 @@ out:
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; idx < old_arr->count; idx++) {
+ for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];
@@ -4268,7 +4317,6 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_neigh_setup = bond_neigh_setup,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
- .ndo_get_lock_subclass = bond_get_nest_level,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4296,7 +4344,6 @@ void bond_setup(struct net_device *bond_dev)
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
- spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
@@ -4365,6 +4412,7 @@ static void bond_uninit(struct net_device *bond_dev)
list_del(&bond->bond_list);
+ lockdep_unregister_key(&bond->stats_lock_key);
bond_debug_unregister(bond);
}
@@ -4768,8 +4816,9 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- bond->nest_level = SINGLE_DEPTH_NESTING;
- netdev_lockdep_set_classes(bond_dev);
+ spin_lock_init(&bond->stats_lock);
+ lockdep_register_key(&bond->stats_lock_key);
+ lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
list_add_tail(&bond->bond_list, &bn->dev_list);
@@ -4901,6 +4950,10 @@ static int __init bonding_init(void)
goto err;
}
+ skb_flow_dissector_init(&flow_keys_bonding,
+ flow_keys_bonding_keys,
+ ARRAY_SIZE(flow_keys_bonding_keys));
+
register_netdevice_notifier(&bond_netdev_notifier);
out:
return res;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 2b9a2f117113..96d7cef3289f 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -3,7 +3,13 @@
# CAIF physical drivers
#
-comment "CAIF transport drivers"
+menuconfig CAIF_DRIVERS
+ bool "CAIF transport drivers"
+ depends on CAIF
+ help
+ Enable this to see CAIF physical drivers.
+
+if CAIF_DRIVERS
config CAIF_TTY
tristate "CAIF TTY transport driver"
@@ -22,7 +28,7 @@ config CAIF_SPI_SLAVE
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
This driver implements a platform driver to accommodate for a
platform specific SPI device. A sample CAIF SPI Platform device is
- provided in Documentation/networking/caif/spi_porting.txt
+ provided in <file:Documentation/networking/caif/spi_porting.txt>.
config CAIF_SPI_SYNC
bool "Next command and length in start of frame"
@@ -38,7 +44,7 @@ config CAIF_HSI
depends on CAIF
default n
---help---
- The caif low level driver for CAIF over HSI.
+ The CAIF low level driver for CAIF over HSI.
Be aware that if you enable this then you also need to
enable a low-level HSI driver.
@@ -50,8 +56,10 @@ config CAIF_VIRTIO
select GENERIC_ALLOCATOR
default n
---help---
- The caif driver for CAIF over Virtio.
+ The CAIF driver for CAIF over Virtio.
if CAIF_VIRTIO
source "drivers/vhost/Kconfig.vringh"
endif
+
+endif # CAIF_DRIVERS
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f6232ce8481f..685e12b05a7c 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -77,6 +77,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
+ select REGMAP
---help---
This enables support for the SMSC/Microchip LAN9303 3 port ethernet
switch chips.
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 526ba2ab66f1..36828f210030 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -524,7 +524,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
if (!dsa_is_user_port(ds, port))
return 0;
- cpu_port = ds->ports[port].cpu_dp->index;
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -1503,11 +1503,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
idx = 1;
}
- memset(&ent, 0, sizeof(ent));
- ent.port = port;
+ /* For multicast address, the port is a bitmask and the validity
+ * is determined by having at least one port being still active
+ */
+ if (!is_multicast_ether_addr(addr)) {
+ ent.port = port;
+ ent.is_valid = is_valid;
+ } else {
+ if (is_valid)
+ ent.port |= BIT(port);
+ else
+ ent.port &= ~BIT(port);
+
+ ent.is_valid = !!(ent.port);
+ }
+
ent.is_valid = is_valid;
ent.vid = vid;
ent.is_static = true;
+ ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
@@ -1626,10 +1640,51 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_mdb_prepare);
+
+void b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ if (ret)
+ dev_err(ds->dev, "failed to add MDB entry\n");
+}
+EXPORT_SYMBOL(b53_mdb_add);
+
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct b53_device *priv = ds->priv;
+ int ret;
+
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+ if (ret)
+ dev_err(ds->dev, "failed to delete MDB entry\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_mdb_del);
+
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
@@ -1675,7 +1730,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1845,7 +1900,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
loc = B53_EG_MIR_CTL;
b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
- reg &= ~MIRROR_MASK;
reg |= BIT(port);
b53_write16(dev, B53_MGMT_PAGE, loc, reg);
@@ -1995,6 +2049,9 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_del = b53_fdb_del,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
+ .port_mdb_prepare = b53_mdb_prepare,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
};
struct b53_chip_data {
@@ -2342,10 +2399,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
struct dsa_switch *ds;
struct b53_device *dev;
- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a7dd8acc281b..1877acf05081 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -250,7 +250,7 @@ b53_build_op(write48, u64);
b53_build_op(write64, u64);
struct b53_arl_entry {
- u8 port;
+ u16 port;
u8 mac[ETH_ALEN];
u16 vid;
u8 is_valid:1;
@@ -351,6 +351,12 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
+int b53_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+void b53_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int b53_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 26509fa37a50..9ce5a0dbcde1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -37,22 +37,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
unsigned int i;
u32 reg, offset;
- if (priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
- /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
- reg = core_readl(priv, CORE_IMP_CTL);
- reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
- reg &= ~(RX_DIS | TX_DIS);
- core_writel(priv, reg, CORE_IMP_CTL);
-
/* Enable forwarding */
core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
@@ -71,10 +60,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- core_writel(priv, reg, offset);
+ if (port == 8) {
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_IMP;
+ else
+ offset = CORE_STS_OVERRIDE_IMP2;
+
+ /* Force link status for IMP port */
+ reg = core_readl(priv, offset);
+ reg |= (MII_SW_OR | LINK_STS);
+ core_writel(priv, reg, offset);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+ reg = core_readl(priv, CORE_IMP_CTL);
+ reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_IMP_CTL);
+ } else {
+ reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+ }
}
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
@@ -344,6 +350,18 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
{
unsigned int timeout = 1000;
u32 reg;
+ int ret;
+
+ /* The watchdog reset does not work on 7278, we need to hit the
+ * "external" reset line through the reset controller.
+ */
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) {
+ ret = reset_control_assert(priv->rcdev);
+ if (ret)
+ return ret;
+
+ return reset_control_deassert(priv->rcdev);
+ }
reg = core_readl(priv, CORE_WATCHDOG_CTRL);
reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
@@ -375,8 +393,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
struct device_node *dn)
{
struct device_node *port;
- int mode;
unsigned int port_num;
+ phy_interface_t mode;
+ int err;
priv->moca_port = -1;
@@ -389,8 +408,8 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
* has completed, since they might be turned off at that
* time
*/
- mode = of_get_phy_mode(port);
- if (mode < 0)
+ err = of_get_phy_mode(port, &mode);
+ if (err)
continue;
if (mode == PHY_INTERFACE_MODE_INTERNAL)
@@ -662,7 +681,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(ds->ports[port].slave);
+ netif_carrier_off(dsa_to_port(ds, port)->slave);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
@@ -728,7 +747,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -752,9 +771,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
if (p->ethtool_ops->get_wol)
@@ -968,6 +987,9 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_rxnfc = bcm_sf2_set_rxnfc,
.port_mirror_add = b53_mirror_add,
.port_mirror_del = b53_mirror_del,
+ .port_mdb_prepare = b53_mdb_prepare,
+ .port_mdb_add = b53_mdb_add,
+ .port_mdb_del = b53_mdb_del,
};
struct bcm_sf2_of_data {
@@ -1082,6 +1104,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
+ priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "switch");
+ if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER)
+ return PTR_ERR(priv->rcdev);
+
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
@@ -1214,6 +1241,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
/* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
+ reset_control_assert(priv->rcdev);
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 1df30ccec42d..de386dd96d66 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/reset.h>
#include <net/dsa.h>
@@ -64,6 +65,8 @@ struct bcm_sf2_priv {
void __iomem *fcb;
void __iomem *acb;
+ struct reset_control *rcdev;
+
/* Register offsets indirection tables */
u32 type;
const u16 *reg_offsets;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index d264776a95a3..f3f0c3f07391 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -821,7 +821,7 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->ports[port].cpu_dp->index;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
int ret;
@@ -1049,7 +1049,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1092,7 +1092,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = ds->ports[port].cpu_dp->master;
+ struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 925ed135a4d9..c8d7ef27fd72 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -286,10 +286,13 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
dev_info(&mdiodev->dev, "%s: 0x%0x\n",
pdata->name, pdata->enabled_ports);
- ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = &mdiodev->dev;
+ ds->num_ports = DSA_MAX_PORTS;
+
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index bbec86b9418e..e3c333a8f45d 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1283,10 +1283,12 @@ static int lan9303_register_switch(struct lan9303 *chip)
{
int base;
- chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
+ chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
+ chip->ds->dev = chip->dev;
+ chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
base = chip->phy_addr_base;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index a69c9b9878b7..955324968b74 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1854,10 +1854,12 @@ static int gswip_probe(struct platform_device *pdev)
if (!priv->hw_info)
return -EINVAL;
- priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = &gswip_switch_ops;
priv->dev = dev;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index a23d3ffdf0c4..24a5e99f7fd5 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1224,10 +1224,6 @@ static int ksz8795_switch_init(struct ksz_device *dev)
{
int i;
- mutex_init(&dev->stats_mutex);
- mutex_init(&dev->alu_mutex);
- mutex_init(&dev->vlan_mutex);
-
dev->ds->ops = &ksz8795_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index d0f8153e86b7..8b00f8e6c02f 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -25,6 +25,7 @@ KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
static int ksz8795_spi_probe(struct spi_device *spi)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -33,9 +34,9 @@ static int ksz8795_spi_probe(struct spi_device *spi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_spi(spi,
- &ksz8795_regmap_config
- [i]);
+ rc = ksz8795_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 0b1e01f0873d..7d050fab0889 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -17,6 +17,7 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
static int ksz9477_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *i2c_id)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -25,8 +26,9 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_i2c(i2c,
- &ksz9477_regmap_config[i]);
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&i2c->dev,
@@ -85,7 +87,6 @@ MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ksz9477_dt_ids),
},
.probe = ksz9477_i2c_probe,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 2938e892b631..16939f29faa5 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Microchip KSZ9477 register definitions
*
* Copyright (C) 2017-2018 Microchip Technology Inc.
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index f4198d6f72be..c5f64959a184 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -24,6 +24,7 @@ KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
static int ksz9477_spi_probe(struct spi_device *spi)
{
+ struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -32,8 +33,9 @@ static int ksz9477_spi_probe(struct spi_device *spi)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- dev->regmap[i] = devm_regmap_init_spi(spi,
- &ksz9477_regmap_config[i]);
+ rc = ksz9477_regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b0b870f0c252..d8fda4a02640 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -398,10 +398,13 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
struct dsa_switch *ds;
struct ksz_device *swdev;
- ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
+ ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
+ ds->dev = base;
+ ds->num_ports = DSA_MAX_PORTS;
+
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
return NULL;
@@ -419,6 +422,7 @@ EXPORT_SYMBOL(ksz_switch_alloc);
int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops)
{
+ phy_interface_t interface;
int ret;
if (dev->pdata)
@@ -436,7 +440,7 @@ int ksz_switch_register(struct ksz_device *dev,
}
mutex_init(&dev->dev_mutex);
- mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->regmap_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
@@ -453,9 +457,9 @@ int ksz_switch_register(struct ksz_device *dev,
* device tree.
*/
if (dev->dev->of_node) {
- ret = of_get_phy_mode(dev->dev->of_node);
- if (ret >= 0)
- dev->interface = ret;
+ ret = of_get_phy_mode(dev->dev->of_node, &interface);
+ if (ret == 0)
+ dev->interface = interface;
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
}
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index dd60d0837fc6..a20ebb749377 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Microchip switch driver common header
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch driver common header
*
* Copyright (C) 2017-2019 Microchip Technology Inc.
*/
@@ -47,7 +47,7 @@ struct ksz_device {
const char *name;
struct mutex dev_mutex; /* device access */
- struct mutex stats_mutex; /* status access */
+ struct mutex regmap_mutex; /* regmap access */
struct mutex alu_mutex; /* ALU access */
struct mutex vlan_mutex; /* vlan access */
const struct ksz_dev_ops *dev_ops;
@@ -290,6 +290,18 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
+static inline void ksz_regmap_lock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_lock(mtx);
+}
+
+static inline void ksz_regmap_unlock(void *__mtx)
+{
+ struct mutex *mtx = __mtx;
+ mutex_unlock(mtx);
+}
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
@@ -314,6 +326,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
.write_flag_mask = \
KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp, \
regbits, regpad), \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
.reg_format_endian = REGMAP_ENDIAN_BIG, \
.val_format_endian = REGMAP_ENDIAN_BIG \
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1d8d36de4d20..ed1ec10ec62b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -862,7 +862,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
- dsa_port_is_vlan_filtering(&ds->ports[i])) {
+ dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
break;
}
@@ -922,7 +922,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* other port is still a VLAN-aware port.
*/
if (dsa_is_user_port(ds, i) && i != port &&
- !dsa_port_is_vlan_filtering(&ds->ports[i])) {
+ !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -1165,7 +1165,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return;
mutex_lock(&priv->reg_mutex);
@@ -1196,7 +1196,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return 0;
mutex_lock(&priv->reg_mutex);
@@ -1252,7 +1252,7 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
+ dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
if (priv->id == ID_MT7530) {
priv->ethernet = syscon_node_to_regmap(dn);
@@ -1340,7 +1340,9 @@ mt7530_setup(struct dsa_switch *ds)
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- interface = of_get_phy_mode(ds->ports[5].dn);
+ ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node(dn, mac_np) {
@@ -1354,7 +1356,9 @@ mt7530_setup(struct dsa_switch *ds)
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
if (phy_node->parent == priv->dev->of_node->parent) {
- interface = of_get_phy_mode(mac_np);
+ ret = of_get_phy_mode(mac_np, &interface);
+ if (ret && ret != -ENODEV)
+ return ret;
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
@@ -1632,10 +1636,13 @@ mt7530_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = DSA_MAX_PORTS;
+
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
*/
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 2a2489b5196d..a5a37f47b320 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -270,10 +270,12 @@ static int mv88e6060_probe(struct mdio_device *mdiodev)
dev_info(dev, "switch %s detected\n", name);
- ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = MV88E6060_PORTS;
ds->priv = priv;
ds->dev = dev;
ds->ops = &mv88e6060_switch_ops;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 6787d560e9e3..0dbe6c8b9dc0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1057,35 +1057,43 @@ static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
+/* Mask of the local ports allowed to receive frames from a given fabric port */
static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
- struct dsa_switch *ds = NULL;
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
struct net_device *br;
+ struct dsa_port *dp;
+ bool found = false;
u16 pvlan;
- int i;
- if (dev < DSA_MAX_SWITCHES)
- ds = chip->ds->dst->ds[dev];
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->ds->index == dev && dp->index == port) {
+ found = true;
+ break;
+ }
+ }
/* Prevent frames from unknown switch or port */
- if (!ds || port >= ds->num_ports)
+ if (!found)
return 0;
/* Frames from DSA links and CPU ports can egress any local port */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
- br = ds->ports[port].bridge_dev;
+ br = dp->bridge_dev;
pvlan = 0;
/* Frames from user ports can egress any local DSA links and CPU ports,
* as well as any local member of their bridge group.
*/
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- if (dsa_is_cpu_port(chip->ds, i) ||
- dsa_is_dsa_port(chip->ds, i) ||
- (br && dsa_to_port(chip->ds, i)->bridge_dev == br))
- pvlan |= BIT(i);
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds &&
+ (dp->type == DSA_PORT_TYPE_CPU ||
+ dp->type == DSA_PORT_TYPE_DSA ||
+ (br && dp->bridge_dev == br)))
+ pvlan |= BIT(dp->index);
return pvlan;
}
@@ -1135,6 +1143,7 @@ static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
{
+ struct dsa_switch *ds = chip->ds;
int target, port;
int err;
@@ -1143,10 +1152,9 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
/* Initialize the routing port to the 32 possible target devices */
for (target = 0; target < 32; target++) {
- port = 0x1f;
- if (target < DSA_MAX_SWITCHES)
- if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
- port = chip->ds->rtable[target];
+ port = dsa_routing_port(ds, target);
+ if (port == ds->num_ports)
+ port = 0x1f;
err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
if (err)
@@ -1253,7 +1261,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
- return -EOPNOTSUPP;
+ return 0;
/* Skip the local source device, which uses in-chip port VLAN */
if (dev != chip->ds->index)
@@ -1370,6 +1378,22 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -1402,7 +1426,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
- if (!ds->ports[i].slave)
+ if (!dsa_to_port(ds, i)->slave)
continue;
if (vlan.member[i] ==
@@ -1410,7 +1434,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
continue;
if (dsa_to_port(ds, i)->bridge_dev ==
- ds->ports[port].bridge_dev)
+ dsa_to_port(ds, port)->bridge_dev)
break; /* same bridge, check next VLAN */
if (!dsa_to_port(ds, i)->bridge_dev)
@@ -2035,32 +2059,26 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
struct net_device *br)
{
- struct dsa_switch *ds;
- int port;
- int dev;
+ struct dsa_switch *ds = chip->ds;
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
int err;
- /* Remap the Port VLAN of each local bridge group member */
- for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
- if (chip->ds->ports[port].bridge_dev == br) {
- err = mv88e6xxx_port_vlan_map(chip, port);
- if (err)
- return err;
- }
- }
-
- if (!mv88e6xxx_has_pvt(chip))
- return 0;
-
- /* Remap the Port VLAN of each cross-chip bridge group member */
- for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) {
- ds = chip->ds->dst->ds[dev];
- if (!ds)
- break;
-
- for (port = 0; port < ds->num_ports; ++port) {
- if (ds->ports[port].bridge_dev == br) {
- err = mv88e6xxx_pvt_map(chip, dev, port);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dp->bridge_dev == br) {
+ if (dp->ds == ds) {
+ /* This is a local bridge group member,
+ * remap its Port VLAN Map.
+ */
+ err = mv88e6xxx_port_vlan_map(chip, dp->index);
+ if (err)
+ return err;
+ } else {
+ /* This is an external bridge group member,
+ * remap its cross-chip Port VLAN Table entry.
+ */
+ err = mv88e6xxx_pvt_map(chip, dp->ds->index,
+ dp->index);
if (err)
return err;
}
@@ -2101,9 +2119,6 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_has_pvt(chip))
- return 0;
-
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, dev, port);
mv88e6xxx_reg_unlock(chip);
@@ -2116,9 +2131,6 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
{
struct mv88e6xxx_chip *chip = ds->priv;
- if (!mv88e6xxx_has_pvt(chip))
- return;
-
mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, dev, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
@@ -2641,6 +2653,248 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static void mv88e6xxx_teardown(struct dsa_switch *ds)
+{
+ mv88e6xxx_teardown_devlink_params(ds);
+ dsa_devlink_resources_unregister(ds);
+}
+
static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2757,6 +3011,22 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
unlock:
mv88e6xxx_reg_unlock(chip);
+ if (err)
+ return err;
+
+ /* Have to be called without holding the register lock, since
+ * they take the devlink lock, and we later take the locks in
+ * the reverse order when getting/setting parameters or
+ * resource occupancy.
+ */
+ err = mv88e6xxx_setup_devlink_resources(ds);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_params(ds);
+ if (err)
+ dsa_devlink_resources_unregister(ds);
+
return err;
}
@@ -3117,6 +3387,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3246,6 +3518,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
@@ -3280,6 +3554,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
@@ -3322,6 +3598,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3366,6 +3644,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3409,6 +3689,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -3453,6 +3735,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3538,6 +3822,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3587,6 +3873,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3635,6 +3923,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3686,6 +3976,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -3777,6 +4069,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3963,6 +4257,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.phylink_validate = mv88e6185_phylink_validate,
@@ -4003,6 +4299,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
@@ -4049,6 +4347,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
@@ -4105,6 +4405,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4158,6 +4460,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4177,6 +4481,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6085",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4199,6 +4504,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6095,
.name = "Marvell 88E6095/88E6095F",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4219,6 +4525,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6097,
.name = "Marvell 88E6097/88E6097F",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
@@ -4241,6 +4548,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6123",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4263,6 +4571,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6131",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 8,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4283,6 +4592,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
.num_databases = 4096,
+ .num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
.num_gpio = 11,
@@ -4306,6 +4616,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6161",
.num_databases = 4096,
+ .num_macs = 1024,
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4329,6 +4640,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6165,
.name = "Marvell 88E6165",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4352,6 +4664,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6171",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4374,6 +4687,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6172",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4397,6 +4711,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6175",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4419,6 +4734,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6176",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4442,6 +4758,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6185,
.name = "Marvell 88E6185",
.num_databases = 256,
+ .num_macs = 8192,
.num_ports = 10,
.num_internal_phys = 0,
.max_vid = 4095,
@@ -4462,6 +4779,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4485,6 +4803,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6190X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4508,6 +4827,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6191",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
@@ -4558,6 +4878,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6240",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4628,6 +4949,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6320",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4652,6 +4974,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6320,
.name = "Marvell 88E6321",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4675,6 +4998,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
.num_databases = 4096,
+ .num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
.num_gpio = 11,
@@ -4699,6 +5023,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6350",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4721,6 +5046,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6351,
.name = "Marvell 88E6351",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
@@ -4743,6 +5069,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6352,
.name = "Marvell 88E6352",
.num_databases = 4096,
+ .num_macs = 8192,
.num_ports = 7,
.num_internal_phys = 5,
.num_gpio = 15,
@@ -4766,6 +5093,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4789,6 +5117,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6390,
.name = "Marvell 88E6390X",
.num_databases = 4096,
+ .num_macs = 16384,
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.num_gpio = 16,
@@ -4933,6 +5262,7 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
+ .teardown = mv88e6xxx_teardown,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_link_state,
.phylink_mac_config = mv88e6xxx_mac_config,
@@ -4975,6 +5305,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_txtstamp = mv88e6xxx_port_txtstamp,
.port_rxtstamp = mv88e6xxx_port_rxtstamp,
.get_ts_info = mv88e6xxx_get_ts_info,
+ .devlink_param_get = mv88e6xxx_devlink_param_get,
+ .devlink_param_set = mv88e6xxx_devlink_param_set,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -4982,10 +5314,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
struct device *dev = chip->dev;
struct dsa_switch *ds;
- ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = mv88e6xxx_num_ports(chip);
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index e9b1a1ac9a8e..65ce09bdcbcf 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -94,6 +94,7 @@ struct mv88e6xxx_info {
u16 prod_num;
const char *name;
unsigned int num_databases;
+ unsigned int num_macs;
unsigned int num_ports;
unsigned int num_internal_phys;
unsigned int num_gpio;
@@ -497,6 +498,10 @@ struct mv88e6xxx_ops {
int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+ /* Address Translation Unit operations */
+ int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
+ int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
+
/* VLAN Translation Unit operations */
int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -609,6 +614,11 @@ static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
return chip->info->num_databases;
}
+static inline unsigned int mv88e6xxx_num_macs(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_macs;
+}
+
static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
{
return chip->info->num_ports;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 0870fcc8bfc8..342172275841 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -109,6 +109,7 @@
/* Offset 0x0A: ATU Control Register */
#define MV88E6XXX_G1_ATU_CTL 0x0a
#define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008
+#define MV88E6161_G1_ATU_CTL_HASH_MASK 0x0003
/* Offset 0x0B: ATU Operation Register */
#define MV88E6XXX_G1_ATU_OP 0x0b
@@ -318,6 +319,8 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port,
bool all);
int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -338,5 +341,6 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 792a96ef418f..bdcd25560dd2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -73,6 +73,38 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
return 0;
}
+int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ int err;
+ u16 val;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ *hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK;
+
+ return 0;
+}
+
+int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ int err;
+ u16 val;
+
+ if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK)
+ return -EINVAL;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val);
+ if (err)
+ return err;
+
+ val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK;
+ val |= hash;
+
+ return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val);
+}
+
/* Offset 0x0B: ATU Operation Register */
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
@@ -122,6 +154,11 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
return mv88e6xxx_g1_atu_op_wait(chip);
}
+int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
+{
+ return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
+}
+
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index bdbb72fc20ed..87bfe7c8c9cd 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -280,6 +280,19 @@ int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
return err;
}
+/* Offset 0x0E: ATU Statistics */
+
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
+{
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
+ kind | bin);
+}
+
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
+{
+ return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
+}
+
/* Offset 0x0F: Priority Override Table */
static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 42da4bca73e8..d80ad203d126 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -113,7 +113,16 @@
#define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff
/* Offset 0x0E: ATU Stats Register */
-#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS 0x0e
+#define MV88E6XXX_G2_ATU_STATS_BIN_0 (0x0 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_1 (0x1 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_2 (0x2 << 14)
+#define MV88E6XXX_G2_ATU_STATS_BIN_3 (0x3 << 14)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL (0x0 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC (0x1 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL (0x2 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC (0x3 << 12)
+#define MV88E6XXX_G2_ATU_STATS_MASK 0x0fff
/* Offset 0x0F: Priority Override Table */
#define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f
@@ -353,6 +362,8 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
+int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
@@ -515,6 +526,17 @@ static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
+ u16 kind, u16 bin)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 684aa51684db..e548289df31e 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -639,7 +639,8 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i, phy_mode = -1;
+ phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA;
+ int ret, i;
u32 mask;
/* Make sure that port 0 is the cpu port */
@@ -661,10 +662,10 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode);
+ if (ret) {
pr_err("Can't find phy-mode for master device\n");
- return phy_mode;
+ return ret;
}
ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
if (ret < 0)
@@ -705,7 +706,7 @@ qca8k_setup(struct dsa_switch *ds)
BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
/* Setup connection between CPU port & user ports */
- for (i = 0; i < DSA_MAX_PORTS; i++) {
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@@ -1077,10 +1078,13 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
+ QCA8K_NUM_PORTS);
if (!priv->ds)
return -ENOMEM;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index dc0509c02d29..fae188c60191 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -444,9 +444,12 @@ static int realtek_smi_probe(struct platform_device *pdev)
return ret;
}
- smi->ds = dsa_switch_alloc(dev, smi->num_ports);
+ smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
if (!smi->ds)
return -ENOMEM;
+
+ smi->ds->dev = dev;
+ smi->ds->num_ports = smi->num_ports;
smi->ds->priv = smi;
smi->ds->ops = var->ds_ops;
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index ca3d17e43ed8..ac88caca5ad4 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct realtek_smi *smi = ds->priv;
+ u16 vid;
int ret;
- if (!smi->ops->is_vlan_valid(smi, port))
- return -EINVAL;
+ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
vlan->vid_begin, vlan->vid_end);
@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
u16 vid;
int ret;
- if (!smi->ops->is_vlan_valid(smi, port))
- return;
+ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return;
dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
port,
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a268085ffad2..f5cc8b0a7c74 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
dev_err(smi->dev, "failed to get parent IRQ\n");
- return irq ? irq : -EINVAL;
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
}
/* This clears the IRQ status register */
@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
&val);
if (ret) {
dev_err(smi->dev, "can't read interrupt status\n");
- return ret;
+ goto out_put_node;
}
/* Fetch IRQ edge information from the descriptor */
@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
val);
if (ret) {
dev_err(smi->dev, "could not configure IRQ polarity\n");
- return ret;
+ goto out_put_node;
}
ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
"RTL8366RB", smi);
if (ret) {
dev_err(smi->dev, "unable to request irq: %d\n", ret);
- return ret;
+ goto out_put_node;
}
smi->irqdomain = irq_domain_add_linear(intc,
RTL8366RB_NUM_INTERRUPT,
@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
smi);
if (!smi->irqdomain) {
dev_err(smi->dev, "failed to create IRQ domain\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
for (i = 0; i < smi->num_ports; i++)
irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
- return 0;
+out_put_node:
+ of_node_put(intc);
+ return ret;
}
static int rtl8366rb_set_addr(struct realtek_smi *smi)
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index f40b248f0b23..ffac0ea4e8d5 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -26,8 +26,8 @@ config NET_DSA_SJA1105_PTP
config NET_DSA_SJA1105_TAS
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
- depends on NET_DSA_SJA1105
- depends on NET_SCH_TAPRIO
+ depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+ depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
help
This enables support for the TTEthernet-based egress scheduling
engine in the SJA1105 DSA driver, which is controlled using a
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index e53e494c22e0..91063ed3ef1b 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_H
@@ -21,6 +21,7 @@
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
#include "sja1105_tas.h"
+#include "sja1105_ptp.h"
/* Keeps the different addresses between E/T and P/Q/R/S */
struct sja1105_regs {
@@ -32,9 +33,8 @@ struct sja1105_regs {
u64 config;
u64 rmii_pll1;
u64 ptp_control;
- u64 ptpclk;
+ u64 ptpclkval;
u64 ptpclkrate;
- u64 ptptsclk;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
u64 pad_mii_id[SJA1105_NUM_PORTS];
@@ -71,7 +71,8 @@ struct sja1105_info {
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
- int (*ptp_cmd)(const void *ctx, const void *data);
+ int (*ptp_cmd)(const struct dsa_switch *ds,
+ const struct sja1105_ptp_cmd *cmd);
int (*reset_cmd)(const void *ctx, const void *data);
int (*setup_rgmii_delay)(const void *ctx, int port);
/* Prototypes from include/net/dsa.h */
@@ -91,26 +92,16 @@ struct sja1105_private {
struct spi_device *spidev;
struct dsa_switch *ds;
struct sja1105_port ports[SJA1105_NUM_PORTS];
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *clock;
- /* The cycle counter translates the PTP timestamps (based on
- * a free-running counter) into a software time domain.
- */
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
- struct delayed_work refresh_work;
- /* Serializes all operations on the cycle counter */
- struct mutex ptp_lock;
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
struct sja1105_tagger_data tagger_data;
+ struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
#include "sja1105_dynamic_config.h"
-#include "sja1105_ptp.h"
struct sja1105_spi_message {
u64 access;
@@ -127,15 +118,13 @@ typedef enum {
int sja1105_static_config_reload(struct sja1105_private *priv);
/* From sja1105_spi.c */
-int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- void *packed_buf, size_t size_bytes);
-int sja1105_spi_send_int(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u64 *value, u64 size_bytes);
-int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 base_addr,
- void *packed_buf, u64 buf_len);
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len);
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value);
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value);
int sja1105_static_config_upload(struct sja1105_private *priv);
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 608126a15d72..9082e52b55e9 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -118,9 +118,8 @@ static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
idiv.pd = enabled ? 0 : 1; /* Power down? */
sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->cgu_idiv[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
@@ -167,9 +166,8 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
mii_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_tx_clk[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -192,9 +190,8 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
mii_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_rx_clk[port], packed_buf,
- SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -217,9 +214,8 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_ext_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -242,9 +238,8 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->mii_ext_rx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
@@ -337,9 +332,8 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
txc.pd = 0;
sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rgmii_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
/* AGU */
@@ -383,9 +377,8 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_tx[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static void
@@ -405,7 +398,7 @@ sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
}
/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static inline u64 sja1105_rgmii_delay(u64 phase)
+static u64 sja1105_rgmii_delay(u64 phase)
{
/* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
* To avoid floating point operations we'll multiply by 10
@@ -442,9 +435,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
pad_mii_id.txc_pd = 1;
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_id[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
if (rc < 0)
return rc;
@@ -459,9 +451,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
}
sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->pad_mii_id[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
@@ -547,9 +538,8 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
ref_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rmii_ref_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int
@@ -565,9 +555,8 @@ sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
ext_tx_clk.pd = 0; /* Power Down off => enabled */
sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
- regs->rmii_ext_tx_clk[port],
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
}
static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
@@ -595,8 +584,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
pll.pd = 0x1;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to configure PLL1 for 50MHz\n");
return rc;
@@ -606,8 +595,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
pll.pd = 0x0;
sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
- packed_buf, SJA1105_SIZE_CGU_CMD);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
+ SJA1105_SIZE_CGU_CMD);
if (rc < 0) {
dev_err(dev, "failed to enable PLL1\n");
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 91da430045ff..25381bd65ed7 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -686,8 +686,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
@@ -698,8 +698,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
memset(packed_buf, 0, ops->packed_size);
/* Retrieve the read operation's result */
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
@@ -771,8 +771,8 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
- rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
- packed_buf, ops->packed_size);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
+ ops->packed_size);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
index 740dadf43f01..1fc0d13dc623 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_DYNAMIC_CONFIG_H
#define _SJA1105_DYNAMIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index ab581a28cd41..064301cc7d5b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -167,8 +167,8 @@ static int sja1105_port_status_get_mac(struct sja1105_private *priv,
int rc;
/* MAC area */
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port],
- packed_buf, SJA1105_SIZE_MAC_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac[port], packed_buf,
+ SJA1105_SIZE_MAC_AREA);
if (rc < 0)
return rc;
@@ -185,8 +185,8 @@ static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port],
- packed_buf, SJA1105_SIZE_HL1_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl1[port], packed_buf,
+ SJA1105_SIZE_HL1_AREA);
if (rc < 0)
return rc;
@@ -203,8 +203,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port],
- packed_buf, SJA1105_SIZE_HL2_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl2[port], packed_buf,
+ SJA1105_SIZE_HL2_AREA);
if (rc < 0)
return rc;
@@ -215,8 +215,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
priv->info->device_id == SJA1105T_DEVICE_ID)
return 0;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port],
- packed_buf, SJA1105_SIZE_QLEVEL_AREA);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->qlevel[port], packed_buf,
+ SJA1105_SIZE_QLEVEL_AREA);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b9def744bcb3..d5dfda335aa1 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -382,8 +382,8 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
- /* Disallow dynamic changing of the mirror port */
- .mirr_ptacu = 0,
+ /* Allow dynamic changing of the mirror port */
+ .mirr_ptacu = true,
.switchid = priv->ds->index,
/* Priority queue for link-local management frames
* (both ingress to and egress from CPU - PTP, STP etc)
@@ -403,8 +403,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
* by installing a temporary 'management route'
*/
.host_port = dsa_upstream_port(priv->ds, 0),
- /* Same as host port */
- .mirr_port = dsa_upstream_port(priv->ds, 0),
+ /* Default to an invalid value */
+ .mirr_port = SJA1105_NUM_PORTS,
/* Link-local traffic received on casc_port will be forwarded
* to host_port without embedding the source port and device ID
* info in the destination MAC address (presumably because it
@@ -458,9 +458,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
-static inline void
-sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
- int index)
+static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
+ int index)
{
policing[index].sharindx = index;
policing[index].smax = 65535; /* Burst size in bytes */
@@ -507,39 +506,6 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0;
}
-static int sja1105_init_avb_params(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_avb_params_entry *avb;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
-
- /* Discard previous AVB Parameters Table */
- if (table->entry_count) {
- kfree(table->entries);
- table->entry_count = 0;
- }
-
- /* Configure the reception of meta frames only if requested */
- if (!on)
- return 0;
-
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
-
- avb = table->entries;
-
- avb->destmeta = SJA1105_META_DMAC;
- avb->srcmeta = SJA1105_META_SMAC;
-
- return 0;
-}
-
static int sja1105_static_config_load(struct sja1105_private *priv,
struct sja1105_dt_port *ports)
{
@@ -580,9 +546,6 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
- rc = sja1105_init_avb_params(priv, false);
- if (rc < 0)
- return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
@@ -621,8 +584,9 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
for_each_child_of_node(ports_node, child) {
struct device_node *phy_node;
- int phy_mode;
+ phy_interface_t phy_mode;
u32 index;
+ int err;
/* Get switch port number from DT */
if (of_property_read_u32(child, "reg", &index) < 0) {
@@ -633,8 +597,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
/* Get PHY mode from DT */
- phy_mode = of_get_phy_mode(child);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(child, &phy_mode);
+ if (err) {
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
@@ -951,7 +915,7 @@ sja1105_static_fdb_change(struct sja1105_private *priv, int port,
* For the placement of a newly learnt FDB entry, the switch selects the bin
* based on a hash function, and the way within that bin incrementally.
*/
-static inline int sja1105et_fdb_index(int bin, int way)
+static int sja1105et_fdb_index(int bin, int way)
{
return bin * SJA1105ET_FDB_BIN_SIZE + way;
}
@@ -1095,7 +1059,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1158,7 +1122,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
@@ -1204,7 +1168,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_add_cmd(ds, port, addr, vid);
@@ -1215,7 +1179,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
@@ -1254,7 +1218,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
@@ -1687,7 +1651,7 @@ static int sja1105_setup(struct dsa_switch *ds)
return rc;
}
- rc = sja1105_ptp_clock_register(priv);
+ rc = sja1105_ptp_clock_register(ds);
if (rc < 0) {
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
return rc;
@@ -1729,9 +1693,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
struct sja1105_private *priv = ds->priv;
sja1105_tas_teardown(ds);
- cancel_work_sync(&priv->tagger_data.rxtstamp_work);
- skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
- sja1105_ptp_clock_unregister(priv);
+ sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
}
@@ -1743,7 +1705,7 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
- slave = ds->ports[port].slave;
+ slave = dsa_to_port(ds, port)->slave;
slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1775,7 +1737,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
}
/* Transfer skb to the host port. */
- dsa_enqueue_skb(skb, ds->ports[port].slave);
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
/* Wait until the switch has processed the frame */
do {
@@ -1817,11 +1779,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
- struct skb_shared_hwtstamps shwt = {0};
int slot = sp->mgmt_slot;
struct sk_buff *clone;
- u64 now, ts;
- int rc;
/* The tragic fact about the switch having 4x2 slots for installing
* management routes is that all of them except one are actually
@@ -1847,27 +1806,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
if (!clone)
goto out;
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
-
- mutex_lock(&priv->ptp_lock);
-
- now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
- rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
- if (rc < 0) {
- dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
- kfree_skb(clone);
- goto out_unlock_ptp;
- }
-
- ts = sja1105_tstamp_reconstruct(priv, now, ts);
- ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
-
- shwt.hwtstamp = ns_to_ktime(ts);
- skb_complete_tx_timestamp(clone, &shwt);
+ sja1105_ptp_txtstamp_skb(ds, slot, clone);
-out_unlock_ptp:
- mutex_unlock(&priv->ptp_lock);
out:
mutex_unlock(&priv->mgmt_lock);
return NETDEV_TX_OK;
@@ -1897,177 +1837,94 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv);
}
-/* Caller must hold priv->tagger_data.meta_lock */
-static int sja1105_change_rxtstamping(struct sja1105_private *priv,
- bool on)
+static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return sja1105_setup_tc_taprio(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* We have a single mirror (@to) port, but can configure ingress and egress
+ * mirroring on all other (@from) ports.
+ * We need to allow mirroring rules only as long as the @to port is always the
+ * same, and we need to unset the @to port from mirr_port only when there is no
+ * mirroring rule that references it.
+ */
+static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
+ bool ingress, bool enabled)
{
struct sja1105_general_params_entry *general_params;
+ struct sja1105_mac_config_entry *mac;
struct sja1105_table *table;
+ bool already_enabled;
+ u64 new_mirr_port;
int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
- general_params->send_meta1 = on;
- general_params->send_meta0 = on;
- rc = sja1105_init_avb_params(priv, on);
- if (rc < 0)
- return rc;
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- /* Initialize the meta state machine to a known state */
- if (priv->tagger_data.stampable_skb) {
- kfree_skb(priv->tagger_data.stampable_skb);
- priv->tagger_data.stampable_skb = NULL;
+ already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
+ if (already_enabled && enabled && general_params->mirr_port != to) {
+ dev_err(priv->ds->dev,
+ "Delete mirroring rules towards port %llu first\n",
+ general_params->mirr_port);
+ return -EBUSY;
}
- return sja1105_static_config_reload(priv);
-}
-
-static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
-{
- struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- bool rx_on;
- int rc;
+ new_mirr_port = to;
+ if (!enabled) {
+ bool keep = false;
+ int port;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- priv->ports[port].hwts_tx_en = false;
- break;
- case HWTSTAMP_TX_ON:
- priv->ports[port].hwts_tx_en = true;
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- rx_on = false;
- break;
- default:
- rx_on = true;
- break;
- }
-
- if (rx_on != priv->tagger_data.hwts_rx_en) {
- spin_lock(&priv->tagger_data.meta_lock);
- rc = sja1105_change_rxtstamping(priv, rx_on);
- spin_unlock(&priv->tagger_data.meta_lock);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to change RX timestamping: %d\n", rc);
- return -EFAULT;
+ /* Anybody still referencing mirr_port? */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ if (mac[port].ing_mirr || mac[port].egr_mirr) {
+ keep = true;
+ break;
+ }
}
- priv->tagger_data.hwts_rx_en = rx_on;
+ /* Unset already_enabled for next time */
+ if (!keep)
+ new_mirr_port = SJA1105_NUM_PORTS;
}
+ if (new_mirr_port != general_params->mirr_port) {
+ general_params->mirr_port = new_mirr_port;
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
- return 0;
-}
-
-static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
-{
- struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
-
- config.flags = 0;
- if (priv->ports[port].hwts_tx_en)
- config.tx_type = HWTSTAMP_TX_ON;
- else
- config.tx_type = HWTSTAMP_TX_OFF;
- if (priv->tagger_data.hwts_rx_en)
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-#define to_tagger(d) \
- container_of((d), struct sja1105_tagger_data, rxtstamp_work)
-#define to_sja1105(d) \
- container_of((d), struct sja1105_private, tagger_data)
-
-static void sja1105_rxtstamp_work(struct work_struct *work)
-{
- struct sja1105_tagger_data *data = to_tagger(work);
- struct sja1105_private *priv = to_sja1105(data);
- struct sk_buff *skb;
- u64 now;
-
- mutex_lock(&priv->ptp_lock);
-
- now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
- while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
- struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
- u64 ts;
-
- *shwt = (struct skb_shared_hwtstamps) {0};
-
- ts = SJA1105_SKB_CB(skb)->meta_tstamp;
- ts = sja1105_tstamp_reconstruct(priv, now, ts);
- ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
-
- shwt->hwtstamp = ns_to_ktime(ts);
- netif_rx_ni(skb);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
+ 0, general_params, true);
+ if (rc < 0)
+ return rc;
}
- mutex_unlock(&priv->ptp_lock);
-}
-
-/* Called from dsa_skb_defer_rx_timestamp */
-static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
-{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *data = &priv->tagger_data;
-
- if (!data->hwts_rx_en)
- return false;
+ if (ingress)
+ mac[from].ing_mirr = enabled;
+ else
+ mac[from].egr_mirr = enabled;
- /* We need to read the full PTP clock to reconstruct the Rx
- * timestamp. For that we need a sleepable context.
- */
- skb_queue_tail(&data->skb_rxtstamp_queue, skb);
- schedule_work(&data->rxtstamp_work);
- return true;
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
+ &mac[from], true);
}
-/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
- * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
- * callback, where we will timestamp it synchronously.
- */
-static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+static int sja1105_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!sp->hwts_tx_en)
- return false;
-
- return true;
+ return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ ingress, true);
}
-static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
- enum tc_setup_type type,
- void *type_data)
+static void sja1105_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- switch (type) {
- case TC_SETUP_QDISC_TAPRIO:
- return sja1105_setup_tc_taprio(ds, port, type_data);
- default:
- return -EOPNOTSUPP;
- }
+ sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
+ mirror->ingress, false);
}
static const struct dsa_switch_ops sja1105_switch_ops = {
@@ -2103,6 +1960,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_rxtstamp = sja1105_port_rxtstamp,
.port_txtstamp = sja1105_port_txtstamp,
.port_setup_tc = sja1105_port_setup_tc,
+ .port_mirror_add = sja1105_mirror_add,
+ .port_mirror_del = sja1105_mirror_del,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2110,23 +1969,22 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
const struct sja1105_regs *regs = priv->info->regs;
u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
struct device *dev = &priv->spidev->dev;
- u64 device_id;
+ u32 device_id;
u64 part_no;
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
- &device_id, SJA1105_SIZE_DEVICE_ID);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id);
if (rc < 0)
return rc;
if (device_id != priv->info->device_id) {
- dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
+ dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n",
priv->info->device_id, device_id);
return -ENODEV;
}
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
- prod_id, SJA1105_SIZE_DEVICE_ID);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
+ SJA1105_SIZE_DEVICE_ID);
if (rc < 0)
return rc;
@@ -2190,31 +2048,37 @@ static int sja1105_probe(struct spi_device *spi)
dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
- ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
+ ds->dev = dev;
+ ds->num_ports = SJA1105_NUM_PORTS;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
tagger_data = &priv->tagger_data;
- skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
- INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+
+ mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->mgmt_lock);
+
+ sja1105_tas_setup(ds);
+
+ rc = dsa_register_switch(priv->ds);
+ if (rc)
+ return rc;
/* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
struct sja1105_port *sp = &priv->ports[i];
- ds->ports[i].priv = sp;
- sp->dp = &ds->ports[i];
+ dsa_to_port(ds, i)->priv = sp;
+ sp->dp = dsa_to_port(ds, i);
sp->data = tagger_data;
}
- mutex_init(&priv->mgmt_lock);
- sja1105_tas_setup(ds);
-
- return dsa_register_switch(priv->ds);
+ return 0;
}
static int sja1105_remove(struct spi_device *spi)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index d8e8dd59f3d1..783100397f8a 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -13,24 +13,6 @@
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
-/* Timestamps are in units of 8 ns clock ticks (equivalent to a fixed
- * 125 MHz clock) so the scale factor (MULT / SHIFT) needs to be 8.
- * Furthermore, wisely pick SHIFT as 28 bits, which translates
- * MULT into 2^31 (0x80000000). This is the same value around which
- * the hardware PTPCLKRATE is centered, so the same ppb conversion
- * arithmetic can be reused.
- */
-#define SJA1105_CC_SHIFT 28
-#define SJA1105_CC_MULT (8 << SJA1105_CC_SHIFT)
-
-/* Having 33 bits of cycle counter left until a 64-bit overflow during delta
- * conversion, we multiply this by the 8 ns counter resolution and arrive at
- * a comfortable 68.71 second refresh interval until the delta would cause
- * an integer overflow, in absence of any other readout.
- * Approximate to 1 minute.
- */
-#define SJA1105_REFRESH_INTERVAL (HZ * 60)
-
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
@@ -41,7 +23,7 @@
*
* This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC)
* and defines the scaling factor between scaled_ppm and the actual
- * frequency adjustments (both cycle counter and hardware).
+ * frequency adjustments of the PHC.
*
* ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16)
* simplifies to
@@ -49,22 +31,154 @@
*/
#define SJA1105_CC_MULT_NUM (1 << 9)
#define SJA1105_CC_MULT_DEM 15625
+#define SJA1105_CC_MULT 0x80000000
-#define ptp_to_sja1105(d) container_of((d), struct sja1105_private, ptp_caps)
-#define cc_to_sja1105(d) container_of((d), struct sja1105_private, tstamp_cc)
-#define dw_to_sja1105(d) container_of((d), struct sja1105_private, refresh_work)
-
-struct sja1105_ptp_cmd {
- u64 resptp; /* reset */
+enum sja1105_ptp_clk_mode {
+ PTP_ADD_MODE = 1,
+ PTP_SET_MODE = 0,
};
+#define ptp_caps_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, caps)
+#define ptp_data_to_sja1105(d) \
+ container_of((d), struct sja1105_private, ptp_data)
+
+static int sja1105_init_avb_params(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ /* Configure the reception of meta frames only if requested */
+ if (!on)
+ return 0;
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+
+ return 0;
+}
+
+/* Must be called only with priv->tagger_data.state bit
+ * SJA1105_HWTS_RX_EN cleared
+ */
+static int sja1105_change_rxtstamping(struct sja1105_private *priv,
+ bool on)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ int rc;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->send_meta1 = on;
+ general_params->send_meta0 = on;
+
+ rc = sja1105_init_avb_params(priv, on);
+ if (rc < 0)
+ return rc;
+
+ /* Initialize the meta state machine to a known state */
+ if (priv->tagger_data.stampable_skb) {
+ kfree_skb(priv->tagger_data.stampable_skb);
+ priv->tagger_data.stampable_skb = NULL;
+ }
+
+ return sja1105_static_config_reload(priv);
+}
+
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+ bool rx_on;
+ int rc;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->ports[port].hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->ports[port].hwts_tx_en = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ rx_on = false;
+ break;
+ default:
+ rx_on = true;
+ break;
+ }
+
+ if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
+ clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+
+ rc = sja1105_change_rxtstamping(priv, rx_on);
+ if (rc < 0) {
+ dev_err(ds->dev,
+ "Failed to change RX timestamping: %d\n", rc);
+ return rc;
+ }
+ if (rx_on)
+ set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+ return 0;
+}
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ if (priv->ports[port].hwts_tx_en)
+ config.tx_type = HWTSTAMP_TX_ON;
+ else
+ config.tx_type = HWTSTAMP_TX_OFF;
+ if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
/* Called during cleanup */
- if (!priv->clock)
+ if (!ptp_data->clock)
return -ENODEV;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
@@ -74,14 +188,14 @@ int sja1105_get_ts_info(struct dsa_switch *ds, int port,
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
- info->phc_index = ptp_clock_index(priv->clock);
+ info->phc_index = ptp_clock_index(ptp_data->clock);
return 0;
}
-int sja1105et_ptp_cmd(const void *ctx, const void *data)
+int sja1105et_ptp_cmd(const struct dsa_switch *ds,
+ const struct sja1105_ptp_cmd *cmd)
{
- const struct sja1105_ptp_cmd *cmd = data;
- const struct sja1105_private *priv = ctx;
+ const struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
@@ -90,15 +204,17 @@ int sja1105et_ptp_cmd(const void *ctx, const void *data)
sja1105_pack(buf, &valid, 31, 31, size);
sja1105_pack(buf, &cmd->resptp, 2, 2, size);
+ sja1105_pack(buf, &cmd->corrclk4ts, 1, 1, size);
+ sja1105_pack(buf, &cmd->ptpclkadd, 0, 0, size);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
- buf, SJA1105_SIZE_PTP_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
}
-int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
+int sja1105pqrs_ptp_cmd(const struct dsa_switch *ds,
+ const struct sja1105_ptp_cmd *cmd)
{
- const struct sja1105_ptp_cmd *cmd = data;
- const struct sja1105_private *priv = ctx;
+ const struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
const int size = SJA1105_SIZE_PTP_CMD;
u8 buf[SJA1105_SIZE_PTP_CMD] = {0};
@@ -107,9 +223,11 @@ int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
sja1105_pack(buf, &valid, 31, 31, size);
sja1105_pack(buf, &cmd->resptp, 3, 3, size);
+ sja1105_pack(buf, &cmd->corrclk4ts, 2, 2, size);
+ sja1105_pack(buf, &cmd->ptpclkadd, 0, 0, size);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control,
- buf, SJA1105_SIZE_PTP_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf,
+ SJA1105_SIZE_PTP_CMD);
}
/* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap
@@ -126,9 +244,10 @@ int sja1105pqrs_ptp_cmd(const void *ctx, const void *data)
* Must be called within one wraparound period of the partial timestamp since
* it was generated by the MAC.
*/
-u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
- u64 ts_partial)
+static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now,
+ u64 ts_partial)
{
+ struct sja1105_private *priv = ds->priv;
u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits);
u64 ts_reconstructed;
@@ -170,8 +289,9 @@ u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
* To have common code for E/T and P/Q/R/S for reading the timestamp,
* we need to juggle with the offset and the bit indices.
*/
-int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts)
{
+ struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
int tstamp_bit_start, tstamp_bit_end;
int timeout = 10;
@@ -180,10 +300,8 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
int rc;
do {
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
- regs->ptpegr_ts[port],
- packed_buf,
- priv->info->ptpegr_ts_bytes);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port],
+ packed_buf, priv->info->ptpegr_ts_bytes);
if (rc < 0)
return rc;
@@ -216,22 +334,109 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
return 0;
}
-int sja1105_ptp_reset(struct sja1105_private *priv)
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks);
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks)
{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks);
+}
+
+#define rxtstamp_to_tagger(d) \
+ container_of((d), struct sja1105_tagger_data, rxtstamp_work)
+#define tagger_to_sja1105(d) \
+ container_of((d), struct sja1105_private, tagger_data)
+
+static void sja1105_rxtstamp_work(struct work_struct *work)
+{
+ struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work);
+ struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct dsa_switch *ds = priv->ds;
- struct sja1105_ptp_cmd cmd = {0};
+ struct sk_buff *skb;
+
+ mutex_lock(&ptp_data->lock);
+
+ while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) {
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ticks, ts;
+ int rc;
+
+ rc = sja1105_ptpclkval_read(priv, &ticks);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ continue;
+ }
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ netif_rx_ni(skb);
+ }
+
+ mutex_unlock(&ptp_data->lock);
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+
+ if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state))
+ return false;
+
+ /* We need to read the full PTP clock to reconstruct the Rx
+ * timestamp. For that we need a sleepable context.
+ */
+ skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb);
+ schedule_work(&tagger_data->rxtstamp_work);
+ return true;
+}
+
+/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
+ * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
+ * callback, where we will timestamp it synchronously.
+ */
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+
+ if (!sp->hwts_tx_en)
+ return false;
+
+ return true;
+}
+
+int sja1105_ptp_reset(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
int rc;
- mutex_lock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
cmd.resptp = 1;
dev_dbg(ds->dev, "Resetting PTP clock\n");
- rc = priv->info->ptp_cmd(priv, &cmd);
-
- timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
+ rc = priv->info->ptp_cmd(ds, &cmd);
- mutex_unlock(&priv->ptp_lock);
+ mutex_unlock(&ptp_data->lock);
return rc;
}
@@ -239,154 +444,185 @@ int sja1105_ptp_reset(struct sja1105_private *priv)
static int sja1105_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
- u64 ns;
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 ticks = 0;
+ int rc;
- mutex_lock(&priv->ptp_lock);
- ns = timecounter_read(&priv->tstamp_tc);
- mutex_unlock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
- *ts = ns_to_timespec64(ns);
+ rc = sja1105_ptpclkval_read(priv, &ticks);
+ *ts = ns_to_timespec64(sja1105_ticks_to_ns(ticks));
- return 0;
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+/* Caller must hold ptp_data->lock */
+static int sja1105_ptp_mode_set(struct sja1105_private *priv,
+ enum sja1105_ptp_clk_mode mode)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ if (ptp_data->cmd.ptpclkadd == mode)
+ return 0;
+
+ ptp_data->cmd.ptpclkadd = mode;
+
+ return priv->info->ptp_cmd(priv->ds, &ptp_data->cmd);
}
+/* Write to PTPCLKVAL while PTPCLKADD is 0 */
static int sja1105_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
- u64 ns = timespec64_to_ns(ts);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ u64 ticks = ns_to_sja1105_ticks(timespec64_to_ns(ts));
+ int rc;
- mutex_lock(&priv->ptp_lock);
- timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, ns);
- mutex_unlock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
- return 0;
+ rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n");
+ goto out;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks);
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 clkrate32;
s64 clkrate;
+ int rc;
clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM;
clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM);
- mutex_lock(&priv->ptp_lock);
-
- /* Force a readout to update the timer *before* changing its frequency.
- *
- * This way, its corrected time curve can at all times be modeled
- * as a linear "A * x + B" function, where:
- *
- * - B are past frequency adjustments and offset shifts, all
- * accumulated into the cycle_last variable.
- *
- * - A is the new frequency adjustments we're just about to set.
- *
- * Reading now makes B accumulate the correct amount of time,
- * corrected at the old rate, before changing it.
- *
- * Hardware timestamps then become simple points on the curve and
- * are approximated using the above function. This is still better
- * than letting the switch take the timestamps using the hardware
- * rate-corrected clock (PTPCLKVAL) - the comparison in this case would
- * be that we're shifting the ruler at the same time as we're taking
- * measurements with it.
- *
- * The disadvantage is that it's possible to receive timestamps when
- * a frequency adjustment took place in the near past.
- * In this case they will be approximated using the new ppb value
- * instead of a compound function made of two segments (one at the old
- * and the other at the new rate) - introducing some inaccuracy.
- */
- timecounter_read(&priv->tstamp_tc);
+ /* Take a +/- value and re-center it around 2^31. */
+ clkrate = SJA1105_CC_MULT + clkrate;
+ WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0));
+ clkrate32 = clkrate;
- priv->tstamp_cc.mult = SJA1105_CC_MULT + clkrate;
+ mutex_lock(&ptp_data->lock);
- mutex_unlock(&priv->ptp_lock);
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32);
- return 0;
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
+/* Write to PTPCLKVAL while PTPCLKADD is 1 */
static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct sja1105_private *priv = ptp_to_sja1105(ptp);
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ s64 ticks = ns_to_sja1105_ticks(delta);
+ int rc;
- mutex_lock(&priv->ptp_lock);
- timecounter_adjtime(&priv->tstamp_tc, delta);
- mutex_unlock(&priv->ptp_lock);
+ mutex_lock(&ptp_data->lock);
- return 0;
+ rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE);
+ if (rc < 0) {
+ dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n");
+ goto out;
+ }
+
+ rc = sja1105_ptpclkval_write(priv, ticks);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
}
-static u64 sja1105_ptptsclk_read(const struct cyclecounter *cc)
+int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
- struct sja1105_private *priv = cc_to_sja1105(cc);
- const struct sja1105_regs *regs = priv->info->regs;
- u64 ptptsclk = 0;
- int rc;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+
+ ptp_data->caps = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "SJA1105 PHC",
+ .adjfine = sja1105_ptp_adjfine,
+ .adjtime = sja1105_ptp_adjtime,
+ .gettime64 = sja1105_ptp_gettime,
+ .settime64 = sja1105_ptp_settime,
+ .max_adj = SJA1105_MAX_ADJ_PPB,
+ };
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->ptptsclk,
- &ptptsclk, 8);
- if (rc < 0)
- dev_err_ratelimited(priv->ds->dev,
- "failed to read ptp cycle counter: %d\n",
- rc);
- return ptptsclk;
+ skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
+ INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+ spin_lock_init(&tagger_data->meta_lock);
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ ptp_data->cmd.corrclk4ts = true;
+ ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+
+ return sja1105_ptp_reset(ds);
}
-static void sja1105_ptp_overflow_check(struct work_struct *work)
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
- struct delayed_work *dw = to_delayed_work(work);
- struct sja1105_private *priv = dw_to_sja1105(dw);
- struct timespec64 ts;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- sja1105_ptp_gettime(&priv->ptp_caps, &ts);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return;
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+ cancel_work_sync(&priv->tagger_data.rxtstamp_work);
+ skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ ptp_clock_unregister(ptp_data->clock);
+ ptp_data->clock = NULL;
}
-static const struct ptp_clock_info sja1105_ptp_caps = {
- .owner = THIS_MODULE,
- .name = "SJA1105 PHC",
- .adjfine = sja1105_ptp_adjfine,
- .adjtime = sja1105_ptp_adjtime,
- .gettime64 = sja1105_ptp_gettime,
- .settime64 = sja1105_ptp_settime,
- .max_adj = SJA1105_MAX_ADJ_PPB,
-};
-
-int sja1105_ptp_clock_register(struct sja1105_private *priv)
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *skb)
{
- struct dsa_switch *ds = priv->ds;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct skb_shared_hwtstamps shwt = {0};
+ u64 ticks, ts;
+ int rc;
- /* Set up the cycle counter */
- priv->tstamp_cc = (struct cyclecounter) {
- .read = sja1105_ptptsclk_read,
- .mask = CYCLECOUNTER_MASK(64),
- .shift = SJA1105_CC_SHIFT,
- .mult = SJA1105_CC_MULT,
- };
- mutex_init(&priv->ptp_lock);
- priv->ptp_caps = sja1105_ptp_caps;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
- if (IS_ERR_OR_NULL(priv->clock))
- return PTR_ERR(priv->clock);
+ mutex_lock(&ptp_data->lock);
- INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+ rc = sja1105_ptpclkval_read(priv, &ticks);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+ kfree_skb(skb);
+ goto out;
+ }
- return sja1105_ptp_reset(priv);
-}
+ rc = sja1105_ptpegr_ts_poll(ds, slot, &ts);
+ if (rc < 0) {
+ dev_err(ds->dev, "timed out polling for tstamp\n");
+ kfree_skb(skb);
+ goto out;
+ }
-void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
-{
- if (IS_ERR_OR_NULL(priv->clock))
- return;
+ ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+ skb_complete_tx_timestamp(skb, &shwt);
- cancel_delayed_work_sync(&priv->refresh_work);
- ptp_clock_unregister(priv->clock);
- priv->clock = NULL;
+out:
+ mutex_unlock(&ptp_data->lock);
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index af456b0a4d27..243f130374d2 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -1,54 +1,93 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_PTP_H
#define _SJA1105_PTP_H
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
-int sja1105_ptp_clock_register(struct sja1105_private *priv);
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
-void sja1105_ptp_clock_unregister(struct sja1105_private *priv);
+struct sja1105_ptp_cmd {
+ u64 resptp; /* reset */
+ u64 corrclk4ts; /* use the corrected clock for timestamps */
+ u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */
+};
-int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts);
+struct sja1105_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct sja1105_ptp_cmd cmd;
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
-int sja1105et_ptp_cmd(const void *ctx, const void *data);
+int sja1105_ptp_clock_register(struct dsa_switch *ds);
-int sja1105pqrs_ptp_cmd(const void *ctx, const void *data);
+void sja1105_ptp_clock_unregister(struct dsa_switch *ds);
+
+int sja1105et_ptp_cmd(const struct dsa_switch *ds,
+ const struct sja1105_ptp_cmd *cmd);
+
+int sja1105pqrs_ptp_cmd(const struct dsa_switch *ds,
+ const struct sja1105_ptp_cmd *cmd);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
struct ethtool_ts_info *ts);
-u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now,
- u64 ts_partial);
+void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone);
+
+int sja1105_ptp_reset(struct dsa_switch *ds);
+
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type);
+
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
-int sja1105_ptp_reset(struct sja1105_private *priv);
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
#else
-static inline int sja1105_ptp_clock_register(struct sja1105_private *priv)
-{
- return 0;
-}
+struct sja1105_ptp_cmd;
-static inline void sja1105_ptp_clock_unregister(struct sja1105_private *priv)
-{
- return;
-}
+/* Structures cannot be empty in C. Bah!
+ * Keep the mutex as the only element, which is a bit more difficult to
+ * refactor out of sja1105_main.c anyway.
+ */
+struct sja1105_ptp_data {
+ struct mutex lock;
+};
-static inline int
-sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts)
+static inline int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
return 0;
}
-static inline u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv,
- u64 now, u64 ts_partial)
+static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
+ struct sk_buff *clone)
{
- return 0;
}
-static inline int sja1105_ptp_reset(struct sja1105_private *priv)
+static inline int sja1105_ptp_reset(struct dsa_switch *ds)
{
return 0;
}
@@ -59,6 +98,14 @@ static inline int sja1105_ptp_reset(struct sja1105_private *priv)
#define sja1105_get_ts_info NULL
+#define sja1105_port_rxtstamp NULL
+
+#define sja1105_port_txtstamp NULL
+
+#define sja1105_hwtstamp_get NULL
+
+#define sja1105_hwtstamp_set NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 84dc603138cf..ed02410a9366 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -7,42 +7,15 @@
#include <linux/packing.h>
#include "sja1105.h"
-#define SJA1105_SIZE_PORT_CTRL 4
#define SJA1105_SIZE_RESET_CMD 4
#define SJA1105_SIZE_SPI_MSG_HEADER 4
#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
-#define SJA1105_SIZE_SPI_TRANSFER_MAX \
- (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN)
-static int sja1105_spi_transfer(const struct sja1105_private *priv,
- const void *tx, void *rx, int size)
-{
- struct spi_device *spi = priv->spidev;
- struct spi_transfer transfer = {
- .tx_buf = tx,
- .rx_buf = rx,
- .len = size,
- };
- struct spi_message msg;
- int rc;
-
- if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) {
- dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n",
- size, SJA1105_SIZE_SPI_TRANSFER_MAX);
- return -EMSGSIZE;
- }
-
- spi_message_init(&msg);
- spi_message_add_tail(&transfer, &msg);
-
- rc = spi_sync(spi, &msg);
- if (rc < 0) {
- dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
- return rc;
- }
-
- return rc;
-}
+struct sja1105_chunk {
+ u8 *buf;
+ size_t len;
+ u64 reg_addr;
+};
static void
sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
@@ -56,121 +29,150 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
sja1105_pack(buf, &msg->address, 24, 4, size);
}
+#define sja1105_hdr_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk))
+#define sja1105_chunk_xfer(xfers, chunk) \
+ ((xfers) + 2 * (chunk) + 1)
+#define sja1105_hdr_buf(hdr_bufs, chunk) \
+ ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
+
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
- * address reg_addr, taking size_bytes from *packed_buf
+ * address reg_addr, taking @len bytes from *buf
* - SPI_READ: creates and sends an SPI read message from absolute
- * address reg_addr, writing size_bytes into *packed_buf
- *
- * This function should only be called if it is priorly known that
- * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers
- * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below.
+ * address reg_addr, writing @len bytes into *buf
*/
-int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- void *packed_buf, size_t size_bytes)
+int sja1105_xfer_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u8 *buf, size_t len)
{
- u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
- u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
- const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER;
- struct sja1105_spi_message msg = {0};
- int rc;
+ struct sja1105_chunk chunk = {
+ .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
+ .reg_addr = reg_addr,
+ .buf = buf,
+ };
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer *xfers;
+ int num_chunks;
+ int rc, i = 0;
+ u8 *hdr_bufs;
- if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX)
- return -ERANGE;
+ num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
- msg.access = rw;
- msg.address = reg_addr;
- if (rw == SPI_READ)
- msg.read_count = size_bytes / 4;
+ /* One transfer for each message header, one for each message
+ * payload (chunk).
+ */
+ xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
+ GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
- sja1105_spi_message_pack(tx_buf, &msg);
+ /* Packed buffers for the num_chunks SPI message headers,
+ * stored as a contiguous array
+ */
+ hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
+ GFP_KERNEL);
+ if (!hdr_bufs) {
+ kfree(xfers);
+ return -ENOMEM;
+ }
- if (rw == SPI_WRITE)
- memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
- packed_buf, size_bytes);
+ for (i = 0; i < num_chunks; i++) {
+ struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
+ struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
+ u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
+ struct sja1105_spi_message msg;
+
+ /* Populate the transfer's header buffer */
+ msg.address = chunk.reg_addr;
+ msg.access = rw;
+ if (rw == SPI_READ)
+ msg.read_count = chunk.len / 4;
+ else
+ /* Ignored */
+ msg.read_count = 0;
+ sja1105_spi_message_pack(hdr_buf, &msg);
+ hdr_xfer->tx_buf = hdr_buf;
+ hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ /* Populate the transfer's data buffer */
+ if (rw == SPI_READ)
+ chunk_xfer->rx_buf = chunk.buf;
+ else
+ chunk_xfer->tx_buf = chunk.buf;
+ chunk_xfer->len = chunk.len;
+
+ /* Calculate next chunk */
+ chunk.buf += chunk.len;
+ chunk.reg_addr += chunk.len / 4;
+ chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
+ SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ /* De-assert the chip select after each chunk. */
+ if (chunk.len)
+ chunk_xfer->cs_change = 1;
+ }
- rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len);
+ rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
if (rc < 0)
- return rc;
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
- if (rw == SPI_READ)
- memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
- size_bytes);
+ kfree(hdr_bufs);
+ kfree(xfers);
- return 0;
+ return rc;
}
/* If @rw is:
* - SPI_WRITE: creates and sends an SPI write message at absolute
- * address reg_addr, taking size_bytes from *packed_buf
+ * address reg_addr
* - SPI_READ: creates and sends an SPI read message from absolute
- * address reg_addr, writing size_bytes into *packed_buf
+ * address reg_addr
*
* The u64 *value is unpacked, meaning that it's stored in the native
* CPU endianness and directly usable by software running on the core.
- *
- * This is a wrapper around sja1105_spi_send_packed_buf().
*/
-int sja1105_spi_send_int(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 reg_addr,
- u64 *value, u64 size_bytes)
+int sja1105_xfer_u64(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value)
{
- u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN];
+ u8 packed_buf[8];
int rc;
- if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN)
- return -ERANGE;
-
if (rw == SPI_WRITE)
- sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0,
- size_bytes);
+ sja1105_pack(packed_buf, value, 63, 0, 8);
- rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf,
- size_bytes);
+ rc = sja1105_xfer_buf(priv, rw, reg_addr, packed_buf, 8);
if (rw == SPI_READ)
- sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0,
- size_bytes);
+ sja1105_unpack(packed_buf, value, 63, 0, 8);
return rc;
}
-/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
- * must be sent/received. Splitting the buffer into chunks and assembling
- * those into SPI messages is done automatically by this function.
- */
-int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
- sja1105_spi_rw_mode_t rw, u64 base_addr,
- void *packed_buf, u64 buf_len)
+/* Same as above, but transfers only a 4 byte word */
+int sja1105_xfer_u32(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value)
{
- struct chunk {
- void *buf_ptr;
- int len;
- u64 spi_address;
- } chunk;
- int distance_to_end;
+ u8 packed_buf[4];
+ u64 tmp;
int rc;
- /* Initialize chunk */
- chunk.buf_ptr = packed_buf;
- chunk.spi_address = base_addr;
- chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ if (rw == SPI_WRITE) {
+ /* The packing API only supports u64 as CPU word size,
+ * so we need to convert.
+ */
+ tmp = *value;
+ sja1105_pack(packed_buf, &tmp, 31, 0, 4);
+ }
- while (chunk.len) {
- rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address,
- chunk.buf_ptr, chunk.len);
- if (rc < 0)
- return rc;
+ rc = sja1105_xfer_buf(priv, rw, reg_addr, packed_buf, 4);
- chunk.buf_ptr += chunk.len;
- chunk.spi_address += chunk.len / 4;
- distance_to_end = (uintptr_t)(packed_buf + buf_len -
- chunk.buf_ptr);
- chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ if (rw == SPI_READ) {
+ sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
+ *value = tmp;
}
- return 0;
+ return rc;
}
/* Back-ported structure from UM11040 Table 112.
@@ -241,8 +243,8 @@ static int sja1105et_reset_cmd(const void *ctx, const void *data)
sja1105et_reset_cmd_pack(packed_buf, reset);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
- packed_buf, SJA1105_SIZE_RESET_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
}
static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
@@ -271,8 +273,8 @@ static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
sja1105pqrs_reset_cmd_pack(packed_buf, reset);
- return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
- packed_buf, SJA1105_SIZE_RESET_CMD);
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
+ SJA1105_SIZE_RESET_CMD);
}
static int sja1105_cold_reset(const struct sja1105_private *priv)
@@ -287,11 +289,11 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited)
{
const struct sja1105_regs *regs = priv->info->regs;
- u64 inhibit_cmd;
+ u32 inhibit_cmd;
int rc;
- rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control,
- &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd);
if (rc < 0)
return rc;
@@ -300,8 +302,8 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv,
else
inhibit_cmd &= ~port_bitmap;
- return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control,
- &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd);
}
struct sja1105_status {
@@ -339,9 +341,7 @@ static int sja1105_status_get(struct sja1105_private *priv,
u8 packed_buf[4];
int rc;
- rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
- regs->status,
- packed_buf, 4);
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
if (rc < 0)
return rc;
@@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Invalid config, cannot upload\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
/* Prevent PHY jabbering during switch reset by inhibiting
* Tx on all ports and waiting for current packet to drain.
@@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
rc = sja1105_inhibit_tx(priv, port_bitmap, true);
if (rc < 0) {
dev_err(dev, "Failed to inhibit Tx on ports\n");
- return -ENXIO;
+ rc = -ENXIO;
+ goto out;
}
/* Wait for an eventual egress packet to finish transmission
* (reach IFG). It is guaranteed that a second one will not
@@ -435,9 +437,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
/* Wait for the switch to come out of reset */
usleep_range(1000, 5000);
/* Upload the static config to the device */
- rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE,
- regs->config,
- config_buf, buf_len);
+ rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
+ config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Failed to upload config, retrying...\n");
continue;
@@ -480,7 +481,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
}
- rc = sja1105_ptp_reset(priv);
+ rc = sja1105_ptp_reset(priv->ds);
if (rc < 0)
dev_err(dev, "Failed to reset PTP clock: %d\n", rc);
@@ -515,9 +516,8 @@ static struct sja1105_regs sja1105et_regs = {
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
.ptp_control = 0x17,
- .ptpclk = 0x18, /* Spans 0x18 to 0x19 */
+ .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
- .ptptsclk = 0x1B, /* Spans 0x1B to 0x1C */
};
static struct sja1105_regs sja1105pqrs_regs = {
@@ -546,9 +546,8 @@ static struct sja1105_regs sja1105pqrs_regs = {
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
.ptp_control = 0x18,
- .ptpclk = 0x19,
+ .ptpclkval = 0x19,
.ptpclkrate = 0x1B,
- .ptptsclk = 0x1C,
};
struct sja1105_info sja1105e_info = {
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 7f87022a2d61..f4a5c5c04311 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2016-2018, NXP Semiconductors
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_STATIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0b803c30e640..0aad212d88b2 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_TAS_H
#define _SJA1105_TAS_H
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 614377ef7956..42c1574d45f2 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1178,9 +1178,12 @@ int vsc73xx_probe(struct vsc73xx *vsc)
* We allocate 8 ports and avoid access to the nonexistant
* ports.
*/
- vsc->ds = dsa_switch_alloc(dev, 8);
+ vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
+
+ vsc->ds->dev = dev;
+ vsc->ds->num_ports = 8;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e8e9c166185d..4ded81b27d0a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
-source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 05abebc17804..f8f38dcb5f8a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
-obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bb032be7fe31..4cd53fc338b5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -730,12 +730,12 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
struct device_node *np = priv->device->of_node;
- int ret = 0;
+ int ret;
- priv->phy_iface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np, &priv->phy_iface);
/* Avoid get phy addr and create mdio if no phy is present */
- if (!priv->phy_iface)
+ if (ret)
return 0;
/* try to get PHY address from device tree, use PHY autodetection if
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 16553d92fad2..a3250dcf7d53 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -133,7 +133,7 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
u64 *ptr;
int i, j;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
ring = &adapter->tx_ring[i];
@@ -205,7 +205,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
@@ -214,7 +214,7 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
const struct ena_stats *ena_stats;
int i, j;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
@@ -333,7 +333,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->tx_ring[i].smoothed_interval = val;
}
@@ -344,7 +344,7 @@ static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].smoothed_interval = val;
}
@@ -612,7 +612,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = adapter->num_queues;
+ info->data = adapter->num_io_queues;
rc = 0;
break;
case ETHTOOL_GRXFH:
@@ -734,14 +734,20 @@ static void ena_get_channels(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = adapter->num_queues;
- channels->max_tx = adapter->num_queues;
- channels->max_other = 0;
- channels->max_combined = 0;
- channels->rx_count = adapter->num_queues;
- channels->tx_count = adapter->num_queues;
- channels->other_count = 0;
- channels->combined_count = 0;
+ channels->max_combined = adapter->max_num_io_queues;
+ channels->combined_count = adapter->num_io_queues;
+}
+
+static int ena_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ u32 count = channels->combined_count;
+ /* The check for max value is already done in ethtool */
+ if (count < ENA_MIN_NUM_IO_QUEUES)
+ return -EINVAL;
+
+ return ena_update_queue_count(adapter, count);
}
static int ena_get_tunable(struct net_device *netdev,
@@ -807,6 +813,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
.get_channels = ena_get_channels,
+ .set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c487d2a7d6dd..d46a912002ff 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -101,7 +101,7 @@ static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].mtu = mtu;
}
@@ -129,10 +129,10 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
u32 i;
int rc;
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
+ adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
if (!adapter->netdev->rx_cpu_rmap)
return -ENOMEM;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
int irq_idx = ENA_IO_IRQ_IDX(i);
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
@@ -172,7 +172,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
@@ -294,7 +294,7 @@ static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
@@ -322,7 +322,7 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_tx_resources(adapter, i);
}
@@ -428,7 +428,7 @@ static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_rx_resources(adapter, i);
if (rc)
goto err_setup_rx;
@@ -456,7 +456,7 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_resources(adapter, i);
}
@@ -600,7 +600,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
struct ena_ring *rx_ring;
int i, rc, bufs_num;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
bufs_num = rx_ring->ring_size - 1;
rc = ena_refill_rx_bufs(rx_ring, bufs_num);
@@ -616,7 +616,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_bufs(adapter, i);
}
@@ -688,7 +688,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
@@ -699,7 +699,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
@@ -710,7 +710,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
@@ -1331,7 +1331,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
* the number of potential io queues is the minimum of what the device
* supports and the number of vCPUs.
*/
-static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
+static int ena_enable_msix(struct ena_adapter *adapter)
{
int msix_vecs, irq_cnt;
@@ -1342,7 +1342,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
}
/* Reserved the max msix vectors we might need */
- msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
+ msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1359,7 +1359,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_notice(adapter, probe, adapter->netdev,
"enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
- adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
+ adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
if (ena_init_rx_cpu_rmap(adapter))
@@ -1397,7 +1397,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
netdev = adapter->netdev;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
@@ -1529,7 +1529,7 @@ static void ena_del_napi(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
netif_napi_del(&adapter->ena_napi[i].napi);
}
@@ -1538,7 +1538,7 @@ static void ena_init_napi(struct ena_adapter *adapter)
struct ena_napi *napi;
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
@@ -1555,7 +1555,7 @@ static void ena_napi_disable_all(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
@@ -1563,7 +1563,7 @@ static void ena_napi_enable_all(struct ena_adapter *adapter)
{
int i;
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
@@ -1673,7 +1673,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1741,7 +1741,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_rx_queue(adapter, i);
if (rc)
goto create_err;
@@ -1764,7 +1764,7 @@ static void set_io_rings_size(struct ena_adapter *adapter,
{
int i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
adapter->tx_ring[i].ring_size = new_tx_size;
adapter->rx_ring[i].ring_size = new_rx_size;
}
@@ -1902,14 +1902,14 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
/* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
ena_unmask_interrupt(&adapter->tx_ring[i],
&adapter->rx_ring[i]);
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
- for (i = 0; i < adapter->num_queues; i++)
+ for (i = 0; i < adapter->num_io_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
@@ -1984,13 +1984,13 @@ static int ena_open(struct net_device *netdev)
int rc;
/* Notify the stack of the actual queue counts. */
- rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
+ rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
return rc;
}
- rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
+ rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
return rc;
@@ -2043,14 +2043,30 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size)
{
- bool dev_up;
+ bool dev_was_up;
- dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
ena_init_io_rings(adapter);
- return dev_up ? ena_up(adapter) : 0;
+ return dev_was_up ? ena_up(adapter) : 0;
+}
+
+int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ bool dev_was_up;
+
+ dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ ena_close(adapter->netdev);
+ adapter->num_io_queues = new_channel_count;
+ /* We need to destroy the rss table so that the indirection
+ * table will be reinitialized by ena_up()
+ */
+ ena_com_rss_destroy(ena_dev);
+ ena_init_io_rings(adapter);
+ return dev_was_up ? ena_open(adapter->netdev) : 0;
}
static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
@@ -2495,7 +2511,7 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
u64 bytes, packets;
tx_ring = &adapter->tx_ring[i];
@@ -2682,14 +2698,13 @@ err_mmio_read_less:
return rc;
}
-static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
- int io_vectors)
+static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct device *dev = &adapter->pdev->dev;
int rc;
- rc = ena_enable_msix(adapter, io_vectors);
+ rc = ena_enable_msix(adapter);
if (rc) {
dev_err(dev, "Can not reserve msix vectors\n");
return rc;
@@ -2782,8 +2797,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
- rc = ena_enable_msix_and_set_admin_interrupts(adapter,
- adapter->num_queues);
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev, "Enable MSI-X failed\n");
goto err_device_destroy;
@@ -2948,7 +2962,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
- for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
+ for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
@@ -2965,7 +2979,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
- adapter->last_monitored_tx_qid = i % adapter->num_queues;
+ adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -2995,7 +3009,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
refill_required =
@@ -3137,16 +3151,16 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, jiffies + HZ);
}
-static int ena_calc_io_queue_num(struct pci_dev *pdev,
- struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
+ int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&get_feat_ctx->max_queue_ext.max_queue_ext;
- io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
+ io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
max_queue_ext->max_rx_cq_num);
io_tx_sq_num = max_queue_ext->max_tx_sq_num;
@@ -3156,25 +3170,25 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
&get_feat_ctx->max_queues;
io_tx_sq_num = max_queues->max_sq_num;
io_tx_cq_num = max_queues->max_cq_num;
- io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
+ io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
}
/* In case of LLQ use the llq fields for the tx SQ/CQ */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
- io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
- io_queue_num = min_t(int, io_queue_num, io_rx_num);
- io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
- io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
+ max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
+ max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
- io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!io_queue_num)) {
+ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
+ if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
- return io_queue_num;
+ return max_num_io_queues;
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3302,7 +3316,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
- val = ethtool_rxfh_indir_default(i, adapter->num_queues);
+ val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
@@ -3349,7 +3363,7 @@ static void set_default_llq_configurations(struct ena_llq_configurations *llq_co
llq_config->llq_ring_entry_size_value = 128;
}
-static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
@@ -3358,7 +3372,7 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
u32 max_tx_queue_size;
u32 max_rx_queue_size;
- if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
@@ -3432,11 +3446,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
- int io_queue_num, bars, rc;
struct net_device *netdev;
static int adapters_found;
+ u32 max_num_io_queues;
char *queue_type_str;
bool wd_state;
+ int bars, rc;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -3497,27 +3512,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.pdev = pdev;
/* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
- * Updated during device initialization with the real granularity
- */
+ * Updated during device initialization with the real granularity
+ */
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
- io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_queue_size(&calc_queue_ctx);
- if (rc || io_queue_num <= 0) {
+ max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
+ rc = ena_calc_io_queue_size(&calc_queue_ctx);
+ if (rc || !max_num_io_queues) {
rc = -EFAULT;
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
- io_queue_num,
- calc_queue_ctx.rx_queue_size,
- calc_queue_ctx.tx_queue_size,
- (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
- "ENABLED" : "DISABLED");
-
/* dev zeroed in init_etherdev */
- netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
if (!netdev) {
dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
rc = -ENOMEM;
@@ -3545,7 +3553,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
- adapter->num_queues = io_queue_num;
+ adapter->num_io_queues = max_num_io_queues;
+ adapter->max_num_io_queues = max_num_io_queues;
+
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
@@ -3569,7 +3579,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&adapter->syncp);
- rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
+ rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev,
"Failed to enable and set the admin interrupts\n");
@@ -3611,9 +3621,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_type_str = "Low Latency";
dev_info(&pdev->dev,
- "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
+ "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, io_queue_num, queue_type_str);
+ netdev->dev_addr, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 72ee51a82ec7..bffd778f2ce3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -82,6 +82,8 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_MIN_RING_SIZE (256)
+#define ENA_MIN_NUM_IO_QUEUES (1)
+
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
@@ -161,10 +163,10 @@ struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
struct pci_dev *pdev;
- u16 tx_queue_size;
- u16 rx_queue_size;
- u16 max_tx_queue_size;
- u16 max_rx_queue_size;
+ u32 tx_queue_size;
+ u32 rx_queue_size;
+ u32 max_tx_queue_size;
+ u32 max_rx_queue_size;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
@@ -324,7 +326,8 @@ struct ena_adapter {
u32 rx_copybreak;
u32 max_mtu;
- int num_queues;
+ u32 num_io_queues;
+ u32 max_num_io_queues;
int msix_vecs;
@@ -387,6 +390,7 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
+int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 131cab855be7..0020726db204 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -24,8 +24,11 @@ atlantic-objs := aq_main.o \
aq_ethtool.o \
aq_drvinfo.o \
aq_filters.o \
+ aq_phy.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o
+
+atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 02f1b70c4e25..8c633caf79d2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_cfg.h: Definition of configuration parameters and constants. */
@@ -27,7 +27,7 @@
#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
-#define AQ_CFG_IRQ_MASK 0x1FFU
+#define AQ_CFG_IRQ_MASK 0x3FFU
#define AQ_CFG_VECS_MAX 8U
#define AQ_CFG_TCS_MAX 8U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 24df132384fb..1ae8aabcc41a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ethtool.c: Definition of ethertool related functions. */
@@ -9,8 +9,11 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_ptp.h"
#include "aq_filters.h"
+#include <linux/ptp_clock_kernel.h>
+
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
@@ -377,6 +380,35 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
return err;
}
+static int aq_ethtool_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ ethtool_op_get_ts_info(ndev, info);
+
+ if (!aq_nic->aq_ptp)
+ return 0;
+
+ info->so_timestamping |=
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp));
+
+ return 0;
+}
+
static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
{
u32 rate = 0;
@@ -604,4 +636,5 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_link_ksettings = aq_ethtool_set_link_ksettings,
.get_coalesce = aq_ethtool_get_coalesce,
.set_coalesce = aq_ethtool_set_coalesce,
+ .get_ts_info = aq_ethtool_get_ts_info,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index aee827f07c16..6102251bb909 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2014-2017 aQuantia Corporation. */
+/* Copyright (C) 2014-2019 aQuantia Corporation. */
/* File aq_filters.c: RX filters related functions. */
@@ -89,12 +89,14 @@ static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
+ aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
- fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
- AQ_RX_FIRST_LOC_FL3L4,
- AQ_RX_LAST_LOC_FL3L4);
+ AQ_RX_FIRST_LOC_FL3L4, last_location);
return -EINVAL;
}
if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
@@ -124,12 +126,15 @@ aq_check_approve_fl2(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
+ u32 last_location = AQ_RX_LAST_LOC_FETHERT -
+ aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
+
if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
- fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ fsp->location > last_location) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FETHERT,
- AQ_RX_LAST_LOC_FETHERT);
+ last_location);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 53d7478689a0..5246cf44ce51 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
@@ -15,6 +15,9 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_HW_MAC_COUNTER_HZ 312500000ll
+#define AQ_HW_PHY_COUNTER_HZ 160000000ll
+
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
@@ -94,6 +97,7 @@ struct aq_stats_s {
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
#define AQ_HW_FLAG_CLOSING 0x00000020U
+#define AQ_HW_PTP_AVAILABLE 0x01000000U
#define AQ_HW_LINK_DOWN 0x04000000U
#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_HW_FLAG_ERR_HW 0x80000000U
@@ -135,6 +139,8 @@ struct aq_hw_s {
u32 rpc_addr;
u32 rpc_tid;
struct hw_atl_utils_fw_rpc rpc;
+ s64 ptp_clk_offset;
+ u16 phy_id;
};
struct aq_ring_s;
@@ -235,6 +241,40 @@ struct aq_hw_ops {
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
+ int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
+
+ int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
+
+ int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_hwts_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *ring);
+
+ void (*hw_get_ptp_ts)(struct aq_hw_s *self, u64 *stamp);
+
+ int (*hw_adj_clock_freq)(struct aq_hw_s *self, s32 delta);
+
+ int (*hw_adj_sys_clock)(struct aq_hw_s *self, s64 delta);
+
+ int (*hw_set_sys_clock)(struct aq_hw_s *self, u64 time, u64 ts);
+
+ int (*hw_ts_to_sys_clock)(struct aq_hw_s *self, u64 ts, u64 *time);
+
+ int (*hw_gpio_pulse)(struct aq_hw_s *self, u32 index, u64 start,
+ u32 period);
+
+ int (*hw_extts_gpio_enable)(struct aq_hw_s *self, u32 index,
+ u32 enable);
+
+ int (*hw_get_sync_ts)(struct aq_hw_s *self, u64 *ts);
+
+ u16 (*rx_extract_ts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
+ int (*extract_hwts)(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp);
+
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
};
@@ -267,6 +307,12 @@ struct aq_fw_ops {
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
u8 *mac);
+ int (*send_fw_request)(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size);
+
+ void (*enable_ptp)(struct aq_hw_s *self, int enable);
+
int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index b4a0fb281e69..a26d4a69efad 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_main.c: Main file for aQuantia Linux driver. */
@@ -10,10 +10,13 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_ptp.h"
#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
@@ -93,6 +96,24 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
+ /* Hardware adds the Timestamp for PTPv2 802.AS1
+ * and PTPv2 IPv4 UDP.
+ * We have to push even general 320 port messages to the ptp
+ * queue explicitly. This is a limitation of current firmware
+ * and hardware PTP design of the chip. Otherwise ptp stream
+ * will fail to sync
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ unlikely((ip_hdr(skb)->version == 4) &&
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ ((udp_hdr(skb)->dest == htons(319)) ||
+ (udp_hdr(skb)->dest == htons(320)))) ||
+ unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
+ return aq_ptp_xmit(aq_nic, skb);
+ }
+
+ skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
@@ -194,9 +215,88 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- aq_nic_set_packet_filter(aq_nic, ndev->flags);
+ (void)aq_nic_set_multicast_list(aq_nic, ndev);
+}
+
+static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
+ struct hwtstamp_config *config)
+{
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
+}
+
+static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
+ if (ret_val)
+ return ret_val;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ if (!aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
+
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return aq_ndev_hwtstamp_set(aq_nic, ifr);
+
+ case SIOCGHWTSTAMP:
+ return aq_ndev_hwtstamp_get(aq_nic, ifr);
+ }
- aq_nic_set_multicast_list(aq_nic, ndev);
+ return -EOPNOTSUPP;
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
@@ -236,6 +336,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 8f66e7817811..433adc099e44 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.c: Definition of common code for NIC. */
@@ -12,6 +12,9 @@
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_main.h"
+#include "aq_phy.h"
+#include "aq_ptp.h"
+#include "aq_filters.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -145,6 +148,13 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
self->aq_hw->aq_link_status.mbps);
aq_nic_update_interrupt_moderation_settings(self);
+ if (self->aq_ptp) {
+ aq_ptp_clock_init(self);
+ aq_ptp_tm_offset_set(self,
+ self->aq_hw->aq_link_status.mbps);
+ aq_ptp_link_change(self);
+ }
+
/* Driver has to update flow control settings on RX block
* on any link event.
* We should query FW whether it negotiated FC.
@@ -192,6 +202,8 @@ static void aq_nic_service_task(struct work_struct *work)
service_task);
int err;
+ aq_ptp_service_task(self);
+
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
return;
@@ -327,10 +339,27 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
+ if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
+ self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ err = aq_phy_init(self->aq_hw);
+ }
+
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
+ err = aq_ptp_init(self, self->irqvecs - 1);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ptp_ring_init(self);
+ if (err < 0)
+ goto err_exit;
+
netif_carrier_off(self->ndev);
err_exit:
@@ -361,6 +390,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
+ err = aq_ptp_ring_start(self);
+ if (err < 0)
+ goto err_exit;
+
err = self->aq_hw_ops->hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
@@ -388,6 +421,10 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
+ err = aq_ptp_irq_alloc(self);
+ if (err < 0)
+ goto err_exit;
+
if (self->aq_nic_cfg.link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
self->aq_nic_cfg.link_irq_vec);
@@ -420,9 +457,8 @@ err_exit:
return err;
}
-static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
- struct sk_buff *skb,
- struct aq_ring_s *ring)
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring)
{
unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -631,9 +667,12 @@ err_exit:
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
- unsigned int packet_filter = self->packet_filter;
+ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ unsigned int packet_filter = ndev->flags;
struct netdev_hw_addr *ha = NULL;
unsigned int i = 0U;
+ int err = 0;
self->mc_list.count = 0;
if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
@@ -641,29 +680,28 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
} else {
netdev_for_each_uc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
- break;
}
}
- if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
- packet_filter |= IFF_ALLMULTI;
- } else {
- netdev_for_each_mc_addr(ha, ndev) {
- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
- break;
+ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
+ if (cfg->is_mc_list_enabled) {
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_ALLMULTI;
+ } else {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++],
+ ha->addr);
+ }
}
}
if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
- packet_filter |= IFF_MULTICAST;
self->mc_list.count = i;
- self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
- self->mc_list.ar,
- self->mc_list.count);
+ err = hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ return err;
}
return aq_nic_set_packet_filter(self, packet_filter);
}
@@ -951,10 +989,14 @@ int aq_nic_stop(struct aq_nic_s *self)
else
aq_pci_func_free_irqs(self);
+ aq_ptp_irq_free(self);
+
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
+ aq_ptp_ring_stop(self);
+
return self->aq_hw_ops->hw_stop(self->aq_hw);
}
@@ -970,6 +1012,11 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
+ aq_ptp_unregister(self);
+ aq_ptp_ring_deinit(self);
+ aq_ptp_ring_free(self);
+ aq_ptp_free(self);
+
if (likely(self->aq_fw_ops->deinit)) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
@@ -1066,3 +1113,46 @@ void aq_nic_shutdown(struct aq_nic_s *self)
err_exit:
rtnl_unlock();
}
+
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type)
+{
+ u8 location = 0xFF;
+ u32 fltr_cnt;
+ u32 n_bit;
+
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ location = AQ_RX_LAST_LOC_FETHERT - AQ_RX_FIRST_LOC_FETHERT -
+ self->aq_hw_rx_fltrs.fet_reserved_count;
+ self->aq_hw_rx_fltrs.fet_reserved_count++;
+ break;
+ case aq_rx_filter_l3l4:
+ fltr_cnt = AQ_RX_LAST_LOC_FL3L4 - AQ_RX_FIRST_LOC_FL3L4;
+ n_bit = fltr_cnt - self->aq_hw_rx_fltrs.fl3l4.reserved_count;
+
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 |= BIT(n_bit);
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count++;
+ location = n_bit;
+ break;
+ default:
+ break;
+ }
+
+ return location;
+}
+
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location)
+{
+ switch (type) {
+ case aq_rx_filter_ethertype:
+ self->aq_hw_rx_fltrs.fet_reserved_count--;
+ break;
+ case aq_rx_filter_l3l4:
+ self->aq_hw_rx_fltrs.fl3l4.reserved_count--;
+ self->aq_hw_rx_fltrs.fl3l4.active_ipv4 &= ~BIT(location);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 255b54a6ae07..c2513b79b9e9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_nic.h: Declaration of common code for NIC. */
@@ -17,6 +17,8 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
+struct aq_ptp_s;
+enum aq_rx_filter_type;
struct aq_nic_cfg_s {
const struct aq_hw_caps_s *aq_hw_caps;
@@ -53,6 +55,7 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_STOPPING 0x00000008U
#define AQ_NIC_FLAG_RESETTING 0x00000010U
#define AQ_NIC_FLAG_CLOSING 0x00000020U
+#define AQ_NIC_PTP_DPATH_UP 0x02000000U
#define AQ_NIC_LINK_DOWN 0x04000000U
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
@@ -70,6 +73,7 @@ struct aq_hw_rx_fl3l4 {
u8 active_ipv4;
u8 active_ipv6:2;
u8 is_ipv6;
+ u8 reserved_count;
};
struct aq_hw_rx_fltrs_s {
@@ -77,6 +81,8 @@ struct aq_hw_rx_fltrs_s {
u16 active_filters;
struct aq_hw_rx_fl2 fl2;
struct aq_hw_rx_fl3l4 fl3l4;
+ /*filter ether type */
+ u8 fet_reserved_count;
};
struct aq_nic_s {
@@ -108,6 +114,8 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
+ /* PTP support */
+ struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
@@ -126,6 +134,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
+unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_s *ring);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
@@ -148,5 +158,7 @@ u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
-
+u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
+void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
+ u32 location);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 74b9f3f1da81..e82c96b50373 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_pci_func.c: Definition of PCI functions. */
@@ -269,6 +269,9 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
+ /* Request IRQ vector for PTP */
+ numvecs += 1;
+
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
new file mode 100644
index 000000000000..51ae921e3e1f
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* aQuantia Corporation Network Driver
+ * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+ */
+
+#include "aq_phy.h"
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
+ val, val == 0U, 10U, 100000U);
+
+ if (err < 0)
+ return false;
+
+ return true;
+}
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ /* Send Read command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+ /* Read result. */
+ aq_mdio_busy_wait(aq_hw);
+
+ return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
+}
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
+{
+ u16 phy_addr = aq_hw->phy_id << 5 | mmd;
+
+ /* Set Address register. */
+ hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
+ HW_ATL_MDIO_ADDRESS_SHIFT);
+ /* Send Address command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+
+ hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
+ HW_ATL_MDIO_WRITE_DATA_SHIFT);
+ /* Send Write command. */
+ hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
+ (2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
+ ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
+ HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
+
+ aq_mdio_busy_wait(aq_hw);
+}
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+
+ if (err < 0) {
+ err = 0xffff;
+ goto err_exit;
+ }
+
+ err = aq_mdio_read_word(aq_hw, mmd, address);
+
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+
+err_exit:
+ return err;
+}
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
+{
+ int err = 0;
+ u32 val;
+
+ err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
+ val, val == 1U, 10U, 100000U);
+ if (err < 0)
+ return;
+
+ aq_mdio_write_word(aq_hw, mmd, address, data);
+ hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
+{
+ u16 val;
+
+ for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
+ ++aq_hw->phy_id) {
+ /* PMA Standard Device Identifier 2: Address 1.3 */
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (val != 0xffff)
+ return true;
+ }
+
+ return false;
+}
+
+bool aq_phy_init(struct aq_hw_s *aq_hw)
+{
+ u32 dev_id;
+
+ if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
+ if (!aq_phy_init_phy_id(aq_hw))
+ return false;
+
+ /* PMA Standard Device Identifier:
+ * Address 1.2 = MSW,
+ * Address 1.3 = LSW
+ */
+ dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
+ dev_id <<= 16;
+ dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
+
+ if (dev_id == 0xffffffff) {
+ aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
new file mode 100644
index 000000000000..84b72ad04a4a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* aQuantia Corporation Network Driver
+ * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+ */
+
+#ifndef AQ_PHY_H
+#define AQ_PHY_H
+
+#include <linux/mdio.h>
+
+#include "hw_atl/hw_atl_llh.h"
+#include "hw_atl/hw_atl_llh_internal.h"
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+#define HW_ATL_PHY_ID_MAX 32U
+
+bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw);
+
+u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr);
+
+void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data);
+
+u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address);
+
+void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data);
+
+bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
+
+bool aq_phy_init(struct aq_hw_s *aq_hw);
+
+#endif /* AQ_PHY_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
new file mode 100644
index 000000000000..8175513e48c9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Aquantia Corporation Network Driver
+ * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+ */
+
+/* File aq_ptp.c:
+ * Definition of functions for Linux PTP support.
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+
+#include "aq_nic.h"
+#include "aq_ptp.h"
+#include "aq_ring.h"
+#include "aq_phy.h"
+#include "aq_filters.h"
+
+#define AQ_PTP_TX_TIMEOUT (HZ * 10)
+
+#define POLL_SYNC_TIMER_MS 15
+
+enum ptp_speed_offsets {
+ ptp_offset_idx_10 = 0,
+ ptp_offset_idx_100,
+ ptp_offset_idx_1000,
+ ptp_offset_idx_2500,
+ ptp_offset_idx_5000,
+ ptp_offset_idx_10000,
+};
+
+struct ptp_skb_ring {
+ struct sk_buff **buff;
+ spinlock_t lock;
+ unsigned int size;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct ptp_tx_timeout {
+ spinlock_t lock;
+ bool active;
+ unsigned long tx_start;
+};
+
+struct aq_ptp_s {
+ struct aq_nic_s *aq_nic;
+ struct hwtstamp_config hwtstamp_config;
+ spinlock_t ptp_lock;
+ spinlock_t ptp_ring_lock;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+
+ atomic_t offset_egress;
+ atomic_t offset_ingress;
+
+ struct aq_ring_param_s ptp_ring_param;
+
+ struct ptp_tx_timeout ptp_tx_timeout;
+
+ unsigned int idx_vector;
+ struct napi_struct napi;
+
+ struct aq_ring_s ptp_tx;
+ struct aq_ring_s ptp_rx;
+ struct aq_ring_s hwts_rx;
+
+ struct ptp_skb_ring skb_ring;
+
+ struct aq_rx_filter_l3l4 udp_filter;
+ struct aq_rx_filter_l2 eth_type_filter;
+
+ struct delayed_work poll_sync;
+ u32 poll_timeout_ms;
+
+ bool extts_pin_enabled;
+ u64 last_sync1588_ts;
+};
+
+struct ptp_tm_offset {
+ unsigned int mbps;
+ int egress;
+ int ingress;
+};
+
+static struct ptp_tm_offset ptp_offset[6];
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int i, egress, ingress;
+
+ if (!aq_ptp)
+ return;
+
+ egress = 0;
+ ingress = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ if (mbps == ptp_offset[i].mbps) {
+ egress = ptp_offset[i].egress;
+ ingress = ptp_offset[i].ingress;
+ break;
+ }
+ }
+
+ atomic_set(&aq_ptp->offset_egress, egress);
+ atomic_set(&aq_ptp->offset_ingress, ingress);
+}
+
+static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned int next_head = (ring->head + 1) % ring->size;
+
+ if (next_head == ring->tail)
+ return -ENOMEM;
+
+ ring->buff[ring->head] = skb_get(skb);
+ ring->head = next_head;
+
+ return 0;
+}
+
+static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ ret = __aq_ptp_skb_put(ring, skb);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ if (ring->tail == ring->head)
+ return NULL;
+
+ skb = ring->buff[ring->tail];
+ ring->tail = (ring->tail + 1) % ring->size;
+
+ return skb;
+}
+
+static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ skb = __aq_ptp_skb_get(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return skb;
+}
+
+static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
+{
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ len = (ring->head >= ring->tail) ?
+ ring->head - ring->tail :
+ ring->size - ring->tail + ring->head;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return len;
+}
+
+static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
+{
+ struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_init(&ring->lock);
+
+ ring->buff = buff;
+ ring->size = size;
+ ring->head = 0;
+ ring->tail = 0;
+
+ return 0;
+}
+
+static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
+{
+ struct sk_buff *skb;
+
+ while ((skb = aq_ptp_skb_get(ring)) != NULL)
+ dev_kfree_skb_any(skb);
+}
+
+static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
+{
+ if (ring->buff) {
+ aq_ptp_skb_ring_clean(ring);
+ kfree(ring->buff);
+ ring->buff = NULL;
+ }
+}
+
+static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
+{
+ spin_lock_init(&timeout->lock);
+ timeout->active = false;
+}
+
+static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = true;
+ timeout->tx_start = jiffies;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+}
+
+static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
+{
+ if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ timeout->active = false;
+ spin_unlock_irqrestore(&timeout->lock, flags);
+ }
+}
+
+static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
+{
+ struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
+ unsigned long flags;
+ bool timeout_flag;
+
+ timeout_flag = false;
+
+ spin_lock_irqsave(&timeout->lock, flags);
+ if (timeout->active) {
+ timeout_flag = time_is_before_jiffies(timeout->tx_start +
+ AQ_PTP_TX_TIMEOUT);
+ /* reset active flag if timeout detected */
+ if (timeout_flag)
+ timeout->active = false;
+ }
+ spin_unlock_irqrestore(&timeout->lock, flags);
+
+ if (timeout_flag) {
+ aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
+ netdev_err(aq_ptp->aq_nic->ndev,
+ "PTP Timeout. Clearing Tx Timestamp SKBs\n");
+ }
+}
+
+/* aq_ptp_adjfine
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw,
+ scaled_ppm_to_ppb(scaled_ppm));
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+/* aq_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+/* aq_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns);
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/* aq_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int aq_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ unsigned long flags;
+ u64 ns = timespec64_to_ns(ts);
+ u64 now;
+
+ spin_lock_irqsave(&aq_ptp->ptp_lock, flags);
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now);
+ aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now);
+
+ spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags);
+
+ return 0;
+}
+
+static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = ns_to_ktime(timestamp);
+}
+
+static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start,
+ u64 period)
+{
+ if (period)
+ netdev_dbg(aq_nic->ndev,
+ "Enable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+ else
+ netdev_dbg(aq_nic->ndev,
+ "Disable GPIO %d pulsing, start time %llu, period %u\n",
+ pin_index, start, (u32)period);
+
+ /* Notify hardware of request to being sending pulses.
+ * If period is ZERO then pulsen is disabled.
+ */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index,
+ start, (u32)period);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return 0;
+}
+
+static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct ptp_clock_time *t = &rq->perout.period;
+ struct ptp_clock_time *s = &rq->perout.start;
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = rq->perout.index;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ /* we cannot support periods greater
+ * than 4 seconds due to reg limit
+ */
+ if (t->sec > 4 || t->sec < 0)
+ return -ERANGE;
+
+ /* convert to unsigned 64b ns,
+ * verify we can put it in a 32b register
+ */
+ period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0;
+
+ /* verify the value is in range supported by hardware */
+ if (period > U32_MAX)
+ return -ERANGE;
+ /* convert to unsigned 64b ns */
+ /* TODO convert to AQ time */
+ start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 start, period;
+ u32 pin_index = 0;
+ u32 rest = 0;
+
+ /* verify the request channel is there */
+ if (pin_index >= ptp->n_per_out)
+ return -EINVAL;
+
+ aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start);
+ div_u64_rem(start, NSEC_PER_SEC, &rest);
+ period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */
+ start = on ? start - rest + NSEC_PER_SEC *
+ (rest > 990000000LL ? 2 : 1) : 0;
+
+ aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period);
+
+ return 0;
+}
+
+static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u32 enable = aq_ptp->extts_pin_enabled;
+
+ if (aq_nic->aq_hw_ops->hw_extts_gpio_enable)
+ aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0,
+ enable);
+}
+
+static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info);
+
+ u32 pin_index = rq->extts.index;
+
+ if (pin_index >= ptp->n_ext_ts)
+ return -EINVAL;
+
+ aq_ptp->extts_pin_enabled = !!on;
+ if (on) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+
+ aq_ptp_extts_pin_ctrl(aq_ptp);
+ return 0;
+}
+
+/* aq_ptp_gpio_feature_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ */
+static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return aq_ptp_extts_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return aq_ptp_perout_pin_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return aq_ptp_pps_pin_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* aq_ptp_verify
+ * @ptp: the ptp clock structure
+ * @pin: index of the pin in question
+ * @func: the desired function to use
+ * @chan: the function channel index to use
+ */
+static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* verify the requested pin is there */
+ if (!ptp->pin_config || pin >= ptp->n_pins)
+ return -EINVAL;
+
+ /* enforce locked channels, no changing them */
+ if (chan != ptp->pin_config[pin].chan)
+ return -EINVAL;
+
+ /* we want to keep the functions locked as well */
+ if (func != ptp->pin_config[pin].func)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: the private adapter struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
+ struct skb_shared_hwtstamps hwtstamp;
+
+ if (!skb) {
+ netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n");
+ return;
+ }
+
+ timestamp += atomic_read(&aq_ptp->offset_egress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
+ skb_tstamp_tx(skb, &hwtstamp);
+ dev_kfree_skb_any(skb);
+
+ aq_ptp_tx_timeout_update(aq_ptp);
+}
+
+/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the hwtstamps structure which
+ * is passed up the network stack
+ */
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
+ u64 timestamp)
+{
+ timestamp -= atomic_read(&aq_ptp->offset_ingress);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
+}
+
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ *config = aq_ptp->hwtstamp_config;
+}
+
+static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
+{
+ aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 |
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 |
+ HW_ATL_RX_UDP |
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT |
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 |
+ aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ aq_ptp->udp_filter.p_dst = PTP_EV_PORT;
+
+ aq_ptp->eth_type_filter.ethertype = ETH_P_1588;
+ aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx;
+}
+
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ const struct aq_hw_ops *hw_ops;
+ int err = 0;
+
+ hw_ops = aq_nic->aq_hw_ops;
+ if (config->tx_type == HWTSTAMP_TX_ON ||
+ config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) {
+ aq_ptp_prepare_filters(aq_ptp);
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_set) {
+ err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ } else {
+ aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4;
+ if (hw_ops->hw_filter_l3l4_set) {
+ err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw,
+ &aq_ptp->udp_filter);
+ }
+ if (!err && hw_ops->hw_filter_l2_clear) {
+ err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw,
+ &aq_ptp->eth_type_filter);
+ }
+ aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP);
+ }
+
+ if (err)
+ return -EREMOTEIO;
+
+ aq_ptp->hwtstamp_config = *config;
+
+ return 0;
+}
+
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return false;
+
+ return &aq_ptp->ptp_tx == ring ||
+ &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
+}
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ unsigned int len)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ u64 timestamp = 0;
+ u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
+ p, len, &timestamp);
+
+ if (ret > 0)
+ aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
+
+ return ret;
+}
+
+static int aq_ptp_poll(struct napi_struct *napi, int budget)
+{
+ struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ bool was_cleaned = false;
+ int work_done = 0;
+ int err;
+
+ /* Processing PTP TX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+
+ was_cleaned = true;
+ }
+
+ /* Processing HW_TIMESTAMP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
+ aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
+
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ was_cleaned = true;
+ }
+
+ /* Processing PTP RX traffic */
+ err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
+ unsigned int sw_tail_old;
+
+ err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = aq_ptp->ptp_rx.sw_tail;
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (was_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
+ BIT_ULL(aq_ptp->ptp_ring_param.vec_idx));
+ }
+
+err_exit:
+ return work_done;
+}
+
+static irqreturn_t aq_ptp_isr(int irq, void *private)
+{
+ struct aq_ptp_s *aq_ptp = private;
+ int err = 0;
+
+ if (!aq_ptp) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&aq_ptp->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct aq_ring_s *ring = &aq_ptp->ptp_tx;
+ unsigned long irq_flags;
+ int err = NETDEV_TX_OK;
+ unsigned int frags;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+ /* Frags cannot be bigger 16KB
+ * because PTP usually works
+ * without Jumbo even in a background
+ */
+ if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
+ /* Drop packet because it doesn't make sence to delay it */
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
+ if (err) {
+ netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
+ ring->size);
+ return NETDEV_TX_BUSY;
+ }
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ aq_ptp_tx_timeout_start(aq_ptp);
+ skb_tx_timestamp(skb);
+
+ spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+ frags = aq_nic_map_skb(aq_nic, skb, ring);
+
+ if (likely(frags)) {
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
+ ring, frags);
+ if (err >= 0) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ } else {
+ err = NETDEV_TX_BUSY;
+ }
+ spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ptp_tx_timeout_check(aq_ptp);
+}
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ struct pci_dev *pdev = aq_nic->pdev;
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (pdev->msix_enabled || pdev->msi_enabled) {
+ err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
+ aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
+ } else {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct pci_dev *pdev = aq_nic->pdev;
+
+ if (!aq_ptp)
+ return;
+
+ free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
+}
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_ring_init(&aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_tx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->ptp_rx,
+ 0U);
+ if (err < 0)
+ goto err_rx_free;
+
+ err = aq_ring_init(&aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_rx_free;
+ err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx,
+ &aq_ptp->ptp_ring_param);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ return err;
+
+err_rx_free:
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+err_exit:
+ return err;
+}
+
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ int err = 0;
+
+ if (!aq_ptp)
+ return 0;
+
+ err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw,
+ &aq_ptp->hwts_rx);
+ if (err < 0)
+ goto err_exit;
+
+ napi_enable(&aq_ptp->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx);
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
+
+ aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
+
+ napi_disable(&aq_ptp->napi);
+}
+
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic)
+ return;
+
+ aq_ring_tx_clean(&aq_ptp->ptp_tx);
+ aq_ring_rx_deinit(&aq_ptp->ptp_rx);
+}
+
+#define PTP_8TC_RING_IDX 8
+#define PTP_4TC_RING_IDX 16
+#define PTP_HWST_RING_IDX 31
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ unsigned int tx_ring_idx, rx_ring_idx;
+ struct aq_ring_s *hwts;
+ u32 tx_tc_mode, rx_tc_mode;
+ struct aq_ring_s *ring;
+ int err;
+
+ if (!aq_ptp)
+ return 0;
+
+ /* Index must to be 8 (8 TCs) or 16 (4 TCs).
+ * It depends from Traffic Class mode.
+ */
+ aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
+ if (tx_tc_mode == 0)
+ tx_ring_idx = PTP_8TC_RING_IDX;
+ else
+ tx_ring_idx = PTP_4TC_RING_IDX;
+
+ ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+ tx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
+ if (rx_tc_mode == 0)
+ rx_ring_idx = PTP_8TC_RING_IDX;
+ else
+ rx_ring_idx = PTP_4TC_RING_IDX;
+
+ ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+ rx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit_ptp_tx;
+ }
+
+ hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+ aq_nic->aq_nic_cfg.rxds,
+ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+ if (!hwts) {
+ err = -ENOMEM;
+ goto err_exit_ptp_rx;
+ }
+
+ err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
+ if (err != 0) {
+ err = -ENOMEM;
+ goto err_exit_hwts_rx;
+ }
+
+ aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
+ aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
+ aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
+ cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
+ &aq_ptp->ptp_ring_param.affinity_mask);
+
+ return 0;
+
+err_exit_hwts_rx:
+ aq_ring_free(&aq_ptp->hwts_rx);
+err_exit_ptp_rx:
+ aq_ring_free(&aq_ptp->ptp_rx);
+err_exit_ptp_tx:
+ aq_ring_free(&aq_ptp->ptp_tx);
+err_exit:
+ return err;
+}
+
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_ring_free(&aq_ptp->ptp_tx);
+ aq_ring_free(&aq_ptp->ptp_rx);
+ aq_ring_free(&aq_ptp->hwts_rx);
+
+ aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
+}
+
+#define MAX_PTP_GPIO_COUNT 4
+
+static struct ptp_clock_info aq_ptp_clock = {
+ .owner = THIS_MODULE,
+ .name = "atlantic ptp",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfine = aq_ptp_adjfine,
+ .adjtime = aq_ptp_adjtime,
+ .gettime64 = aq_ptp_gettime,
+ .settime64 = aq_ptp_settime,
+ .n_per_out = 0,
+ .enable = aq_ptp_gpio_feature_enable,
+ .n_pins = 0,
+ .verify = aq_ptp_verify,
+ .pin_config = NULL,
+};
+
+#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
+ ptp_offset[__idx].mbps = (__mbps); \
+ ptp_offset[__idx].egress = (__egress); \
+ ptp_offset[__idx].ingress = (__ingress); } \
+ while (0)
+
+static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
+{
+ int i;
+
+ /* Load offsets for PTP */
+ for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
+ switch (i) {
+ /* 100M */
+ case ptp_offset_idx_100:
+ ptp_offset_init(i, 100,
+ offsets->egress_100,
+ offsets->ingress_100);
+ break;
+ /* 1G */
+ case ptp_offset_idx_1000:
+ ptp_offset_init(i, 1000,
+ offsets->egress_1000,
+ offsets->ingress_1000);
+ break;
+ /* 2.5G */
+ case ptp_offset_idx_2500:
+ ptp_offset_init(i, 2500,
+ offsets->egress_2500,
+ offsets->ingress_2500);
+ break;
+ /* 5G */
+ case ptp_offset_idx_5000:
+ ptp_offset_init(i, 5000,
+ offsets->egress_5000,
+ offsets->ingress_5000);
+ break;
+ /* 10G */
+ case ptp_offset_idx_10000:
+ ptp_offset_init(i, 10000,
+ offsets->egress_10000,
+ offsets->ingress_10000);
+ break;
+ }
+ }
+}
+
+static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
+{
+ memset(ptp_offset, 0, sizeof(ptp_offset));
+
+ aq_ptp_offset_init_from_fw(offsets);
+}
+
+static void aq_ptp_gpio_init(struct ptp_clock_info *info,
+ struct hw_aq_info *hw_info)
+{
+ struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT];
+ u32 extts_pin_cnt = 0;
+ u32 out_pin_cnt = 0;
+ u32 i;
+
+ memset(pin_desc, 0, sizeof(pin_desc));
+
+ for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) {
+ if (hw_info->gpio_pin[i] ==
+ (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) {
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", i);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = out_pin_cnt;
+ pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT;
+ }
+ }
+
+ info->n_per_out = out_pin_cnt;
+
+ if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) {
+ extts_pin_cnt += 1;
+
+ snprintf(pin_desc[out_pin_cnt].name,
+ sizeof(pin_desc[out_pin_cnt].name),
+ "AQ_GPIO%d", out_pin_cnt);
+ pin_desc[out_pin_cnt].index = out_pin_cnt;
+ pin_desc[out_pin_cnt].chan = 0;
+ pin_desc[out_pin_cnt].func = PTP_PF_EXTTS;
+ }
+
+ info->n_pins = out_pin_cnt + extts_pin_cnt;
+ info->n_ext_ts = extts_pin_cnt;
+
+ if (!info->n_pins)
+ return;
+
+ info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc),
+ GFP_KERNEL);
+
+ if (!info->pin_config)
+ return;
+
+ memcpy(info->pin_config, &pin_desc,
+ sizeof(struct ptp_pin_desc) * info->n_pins);
+}
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ aq_ptp_settime(&aq_ptp->ptp_info, &ts);
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
+
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ struct hw_atl_utils_mbox mbox;
+ struct ptp_clock *clock;
+ struct aq_ptp_s *aq_ptp;
+ int err = 0;
+
+ if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ if (!aq_nic->aq_fw_ops->enable_ptp) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox);
+
+ if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) {
+ aq_nic->aq_ptp = NULL;
+ return 0;
+ }
+
+ aq_ptp_offset_init(&mbox.info.ptp_offset);
+
+ aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
+ if (!aq_ptp) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ aq_ptp->aq_nic = aq_nic;
+
+ spin_lock_init(&aq_ptp->ptp_lock);
+ spin_lock_init(&aq_ptp->ptp_ring_lock);
+
+ aq_ptp->ptp_info = aq_ptp_clock;
+ aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info);
+ clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev);
+ if (!clock || IS_ERR(clock)) {
+ netdev_err(aq_nic->ndev, "ptp_clock_register failed\n");
+ err = PTR_ERR(clock);
+ goto err_exit;
+ }
+ aq_ptp->ptp_clock = clock;
+ aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
+
+ atomic_set(&aq_ptp->offset_egress, 0);
+ atomic_set(&aq_ptp->offset_ingress, 0);
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
+ aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+
+ aq_ptp->idx_vector = idx_vec;
+
+ aq_nic->aq_ptp = aq_ptp;
+
+ /* enable ptp counter */
+ aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE);
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1);
+ aq_ptp_clock_init(aq_nic);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb);
+ aq_ptp->eth_type_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype);
+ aq_ptp->udp_filter.location =
+ aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4);
+
+ return 0;
+
+err_exit:
+ if (aq_ptp)
+ kfree(aq_ptp->ptp_info.pin_config);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+ return err;
+}
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ ptp_clock_unregister(aq_ptp->ptp_clock);
+}
+
+void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return;
+
+ aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype,
+ aq_ptp->eth_type_filter.location);
+ aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4,
+ aq_ptp->udp_filter.location);
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+ /* disable ptp */
+ mutex_lock(&aq_nic->fwreq_mutex);
+ aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ kfree(aq_ptp->ptp_info.pin_config);
+
+ netif_napi_del(&aq_ptp->napi);
+ kfree(aq_ptp);
+ aq_nic->aq_ptp = NULL;
+}
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return aq_ptp->ptp_clock;
+}
+
+/* PTP external GPIO nanoseconds count */
+static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic)
+{
+ u64 ts = 0;
+
+ if (aq_nic->aq_hw_ops->hw_get_sync_ts)
+ aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts);
+
+ return ts;
+}
+
+static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp)
+{
+ if (aq_ptp->extts_pin_enabled) {
+ aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS;
+ aq_ptp->last_sync1588_ts =
+ aq_ptp_get_sync1588_ts(aq_ptp->aq_nic);
+ schedule_delayed_work(&aq_ptp->poll_sync,
+ msecs_to_jiffies(aq_ptp->poll_timeout_ms));
+ }
+}
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+
+ if (!aq_ptp)
+ return 0;
+
+ if (aq_nic->aq_hw->aq_link_status.mbps)
+ aq_ptp_start_work(aq_ptp);
+ else
+ cancel_delayed_work_sync(&aq_ptp->poll_sync);
+
+ return 0;
+}
+
+static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts2;
+ u64 sync_ts;
+
+ sync_ts = aq_ptp_get_sync1588_ts(aq_nic);
+
+ if (sync_ts != aq_ptp->last_sync1588_ts) {
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ sync_ts = sync_ts2;
+ sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic);
+ if (sync_ts != sync_ts2) {
+ netdev_err(aq_nic->ndev,
+ "%s: Unable to get correct GPIO TS",
+ __func__);
+ sync_ts = 0;
+ }
+ }
+
+ *new_ts = sync_ts;
+ return true;
+ }
+ return false;
+}
+
+static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp)
+{
+ struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
+ u64 sync_ts;
+
+ /* Sync1588 pin was triggered */
+ if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) {
+ if (aq_ptp->extts_pin_enabled) {
+ struct ptp_clock_event ptp_event;
+ u64 time = 0;
+
+ aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw,
+ sync_ts, &time);
+ ptp_event.index = aq_ptp->ptp_info.n_pins - 1;
+ ptp_event.timestamp = time;
+
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(aq_ptp->ptp_clock, &ptp_event);
+ }
+
+ aq_ptp->last_sync1588_ts = sync_ts;
+ }
+
+ return 0;
+}
+
+static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
+{
+ struct delayed_work *dw = to_delayed_work(w);
+ struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync);
+
+ aq_ptp_check_sync1588(aq_ptp);
+
+ if (aq_ptp->extts_pin_enabled) {
+ unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms);
+
+ schedule_delayed_work(&aq_ptp->poll_sync, timeout);
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
new file mode 100644
index 000000000000..231906431a48
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Aquantia Corporation Network Driver
+ * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+ */
+
+/* File aq_ptp.h: Declaration of PTP functions.
+ */
+#ifndef AQ_PTP_H
+#define AQ_PTP_H
+
+#include <linux/net_tstamp.h>
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/* Common functions */
+int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec);
+
+void aq_ptp_unregister(struct aq_nic_s *aq_nic);
+void aq_ptp_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_irq_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_free(struct aq_nic_s *aq_nic);
+
+int aq_ptp_ring_init(struct aq_nic_s *aq_nic);
+int aq_ptp_ring_start(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_stop(struct aq_nic_s *aq_nic);
+void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic);
+
+void aq_ptp_service_task(struct aq_nic_s *aq_nic);
+
+void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps);
+
+void aq_ptp_clock_init(struct aq_nic_s *aq_nic);
+
+/* Traffic processing functions */
+int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb);
+void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
+
+/* Must be to check available of PTP before call */
+void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config);
+
+/* Return either ring is belong to PTP or not*/
+bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
+
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+ unsigned int len);
+
+struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
+
+int aq_ptp_link_change(struct aq_nic_s *aq_nic);
+
+#else
+
+static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
+{
+ return 0;
+}
+
+static inline void aq_ptp_unregister(struct aq_nic_s *aq_nic) {}
+
+static inline void aq_ptp_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
+{
+}
+
+static inline int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_free(struct aq_nic_s *aq_nic) {}
+
+static inline int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+
+static inline void aq_ptp_ring_stop(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_service_task(struct aq_nic_s *aq_nic) {}
+static inline void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic,
+ unsigned int mbps) {}
+static inline void aq_ptp_clock_init(struct aq_nic_s *aq_nic) {}
+static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
+static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config) {}
+static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
+ struct hwtstamp_config *config)
+{
+ return 0;
+}
+
+static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
+{
+ return false;
+}
+
+static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
+ struct sk_buff *skb, u8 *p,
+ unsigned int len)
+{
+ return 0;
+}
+
+static inline struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
+{
+ return NULL;
+}
+
+static inline int aq_ptp_link_change(struct aq_nic_s *aq_nic)
+{
+ return 0;
+}
+#endif
+
+#endif /* AQ_PTP_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3901d7994ca1..f756cc0bbdf0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
@@ -10,6 +10,7 @@
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
+#include "aq_ptp.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -177,6 +178,30 @@ err_exit:
return self;
}
+struct aq_ring_s *
+aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
+ unsigned int idx, unsigned int size, unsigned int dx_size)
+{
+ struct device *dev = aq_nic_get_dev(aq_nic);
+ size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
+
+ memset(self, 0, sizeof(*self));
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = size;
+ self->dx_size = dx_size;
+
+ self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
+ GFP_KERNEL);
+ if (!self->dx_ring) {
+ aq_ring_free(self);
+ return NULL;
+ }
+
+ return self;
+}
+
int aq_ring_init(struct aq_ring_s *self)
{
self->hw_head = 0;
@@ -290,6 +315,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
@@ -313,6 +339,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
break;
buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
@@ -320,7 +347,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = 0;
goto err_exit;
}
- if (buff->is_error) {
+ if (buff->is_error || buff->is_cso_err) {
buff_ = buff;
do {
next_ = buff_->next,
@@ -353,6 +380,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
skb_put(skb, buff->len);
page_ref_inc(buff->rxdata.page);
} else {
@@ -361,6 +393,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
hdr_len = buff->len;
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
@@ -420,8 +457,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
-
- skb_record_rx_queue(skb, self->idx);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
@@ -433,6 +470,21 @@ err_exit:
return err;
}
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
+{
+ while (self->sw_head != self->hw_head) {
+ u64 ns;
+
+ aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
+ self->dx_ring +
+ (self->sw_head * self->dx_size),
+ self->dx_size, &ns);
+ aq_ptp_tx_hwtstamp(aq_nic, ns);
+
+ self->sw_head = aq_ring_next_dx(self, self->sw_head);
+ }
+}
+
int aq_ring_rx_fill(struct aq_ring_s *self)
{
unsigned int page_order = self->page_order;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 47abd09d06c2..be3702a4dcc9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
@@ -174,4 +174,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
+struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
+
#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 30f7fc4c97ff..c7297ca03624 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
@@ -10,6 +10,7 @@
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "../aq_nic.h"
+#include "../aq_phy.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
@@ -49,6 +50,8 @@
.mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
+#define FRAC_PER_NS 0x100000000LL
+
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
@@ -124,13 +127,16 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
- hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
- hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+ tc = 0;
+
+ /* TX Packet Scheduler Data TC0 */
+ hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
+ hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
+ hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
- /* Tx buf size */
- buff_size = HW_ATL_B0_TXBUF_MAX;
+ /* Tx buf size TC0 */
+ buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
@@ -141,10 +147,15 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
+ /* Init TC2 for PTP_TX */
+ tc = 2;
+
+ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
+ tc);
/* QoS Rx buf size per TC */
tc = 0;
- buff_size = HW_ATL_B0_RXBUF_MAX;
+ buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
@@ -158,6 +169,14 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
+ /* Init TC2 for PTP_RX */
+ tc = 2;
+
+ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
+ tc);
+ /* No flow control for PTP */
+ hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
+
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
@@ -664,6 +683,46 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ unsigned int i;
+
+ for (i = aq_ring_avail_dx(ring); i--;
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)
+ &ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
+
+ rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
+ rxd->hdr_addr = 0U;
+ }
+ /* Make sure descriptors are updated before bump tail*/
+ wmb();
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ while (ring->hw_head != ring->sw_tail) {
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb =
+ (struct hw_atl_rxd_hwts_wb_s *)
+ (ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
+
+ /* RxD is not done */
+ if (!(hwts_wb->sec_lw0 & 0x1U))
+ break;
+
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
@@ -818,14 +877,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
cfg->is_vlan_force_promisc);
hw_atl_rpfl2multicast_flr_en_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
hw_atl_rpfl2_accept_all_mc_packets_set(self,
- IS_FILTER_ENABLED(IFF_ALLMULTI));
+ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+ IS_FILTER_ENABLED(IFF_MULTICAST));
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
- cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
hw_atl_rpfl2_uc_flr_en_set(self,
@@ -968,14 +1028,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
+ int err;
+ u32 val;
+
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
/* Invalidate Descriptor Cache to prevent writing to the cached
* descriptors and to the data pointer of those descriptors
*/
- hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
- return aq_hw_err_from_flags(self);
+ err = aq_hw_err_from_flags(self);
+
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ self, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
}
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
@@ -992,6 +1064,227 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
+{
+ *tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
+{
+ *tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define get_ptp_ts_val_u64(self, indx) \
+ ((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
+
+static void hw_atl_b0_get_ptp_ts(struct aq_hw_s *self, u64 *stamp)
+{
+ u64 ns;
+
+ hw_atl_pcs_ptp_clock_read_enable(self, 1);
+ hw_atl_pcs_ptp_clock_read_enable(self, 0);
+ ns = (get_ptp_ts_val_u64(self, 0) +
+ (get_ptp_ts_val_u64(self, 1) << 16)) * NSEC_PER_SEC +
+ (get_ptp_ts_val_u64(self, 3) +
+ (get_ptp_ts_val_u64(self, 4) << 16));
+
+ *stamp = ns + self->ptp_clk_offset;
+}
+
+static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
+{
+ /* For accuracy, the digit is extended */
+ s64 base_ns = ((adj + NSEC_PER_SEC) * NSEC_PER_SEC);
+ u64 nsi_frac = 0;
+ u64 nsi;
+
+ base_ns = div64_s64(base_ns, freq);
+ nsi = div64_u64(base_ns, NSEC_PER_SEC);
+
+ if (base_ns != nsi * NSEC_PER_SEC) {
+ s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
+ base_ns - nsi * NSEC_PER_SEC);
+ nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
+ }
+
+ *ns = (u32)nsi;
+ *fns = (u32)nsi_frac;
+}
+
+static void
+hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
+ u64 phyfreq, u64 macfreq)
+{
+ s64 adj_fns_val;
+ s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
+ FRAC_PER_NS * ptp_adj_freq->ns_phy);
+ s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
+ FRAC_PER_NS * ptp_adj_freq->ns_mac);
+ s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
+ s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
+ /* MAC MCP counter freq is macfreq / 4 */
+ s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
+ 4 * FRAC_PER_NS;
+
+ diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
+ AQ_HW_MAC_COUNTER_HZ);
+ adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
+ ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
+
+ ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
+ ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
+ FRAC_PER_NS;
+}
+
+static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
+{
+ self->ptp_clk_offset += delta;
+
+ return 0;
+}
+
+static int hw_atl_b0_set_sys_clock(struct aq_hw_s *self, u64 time, u64 ts)
+{
+ s64 delta = time - (self->ptp_clk_offset + ts);
+
+ return hw_atl_b0_adj_sys_clock(self, delta);
+}
+
+static int hw_atl_b0_ts_to_sys_clock(struct aq_hw_s *self, u64 ts, u64 *time)
+{
+ *time = self->ptp_clk_offset + ts;
+ return 0;
+}
+
+static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_ADJ_FREQ;
+ hw_atl_b0_adj_params_get(AQ_HW_MAC_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_mac,
+ &fwreq.ptp_adj_freq.fns_mac);
+ hw_atl_b0_adj_params_get(AQ_HW_PHY_COUNTER_HZ, ppb,
+ &fwreq.ptp_adj_freq.ns_phy,
+ &fwreq.ptp_adj_freq.fns_phy);
+ hw_atl_b0_mac_adj_param_calc(&fwreq.ptp_adj_freq,
+ AQ_HW_PHY_COUNTER_HZ,
+ AQ_HW_MAC_COUNTER_HZ);
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_adj_freq);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
+ u64 start, u32 period)
+{
+ struct hw_fw_request_iface fwreq;
+ size_t size;
+
+ memset(&fwreq, 0, sizeof(fwreq));
+
+ fwreq.msg_id = HW_AQ_FW_REQUEST_PTP_GPIO_CTRL;
+ fwreq.ptp_gpio_ctrl.index = index;
+ fwreq.ptp_gpio_ctrl.period = period;
+ /* Apply time offset */
+ fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
+
+ size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
+ return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
+}
+
+static int hw_atl_b0_extts_gpio_enable(struct aq_hw_s *self, u32 index,
+ u32 enable)
+{
+ /* Enable/disable Sync1588 GPIO Timestamping */
+ aq_phy_write_reg(self, MDIO_MMD_PCS, 0xc611, enable ? 0x71 : 0);
+
+ return 0;
+}
+
+static int hw_atl_b0_get_sync_ts(struct aq_hw_s *self, u64 *ts)
+{
+ u64 sec_l;
+ u64 sec_h;
+ u64 nsec_l;
+ u64 nsec_h;
+
+ if (!ts)
+ return -1;
+
+ /* PTP external GPIO clock seconds count 15:0 */
+ sec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc914);
+ /* PTP external GPIO clock seconds count 31:16 */
+ sec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc915);
+ /* PTP external GPIO clock nanoseconds count 15:0 */
+ nsec_l = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc916);
+ /* PTP external GPIO clock nanoseconds count 31:16 */
+ nsec_h = aq_phy_read_reg(self, MDIO_MMD_PCS, 0xc917);
+
+ *ts = (nsec_h << 16) + nsec_l + ((sec_h << 16) + sec_l) * NSEC_PER_SEC;
+
+ return 0;
+}
+
+static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
+ unsigned int len, u64 *timestamp)
+{
+ unsigned int offset = 14;
+ struct ethhdr *eth;
+ __be64 sec;
+ __be32 ns;
+ u8 *ptr;
+
+ if (len <= offset || !timestamp)
+ return 0;
+
+ /* The TIMESTAMP in the end of package has following format:
+ * (big-endian)
+ * struct {
+ * uint64_t sec;
+ * uint32_t ns;
+ * uint16_t stream_id;
+ * };
+ */
+ ptr = p + (len - offset);
+ memcpy(&sec, ptr, sizeof(sec));
+ ptr += sizeof(sec);
+ memcpy(&ns, ptr, sizeof(ns));
+
+ *timestamp = (be64_to_cpu(sec) & 0xffffffffffffllu) * NSEC_PER_SEC +
+ be32_to_cpu(ns) + self->ptp_clk_offset;
+
+ eth = (struct ethhdr *)p;
+
+ return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
+}
+
+static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
+ u64 *timestamp)
+{
+ struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
+ u64 tmp, sec, ns;
+
+ sec = 0;
+ tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
+ sec += tmp;
+ tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
+ sec += tmp;
+ tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
+ sec += tmp;
+ ns = sec * NSEC_PER_SEC + hwts_wb->ns;
+ if (timestamp)
+ *timestamp = ns + self->ptp_clk_offset;
+ return 0;
+}
+
static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
@@ -1025,7 +1318,8 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
hw_atl_b0_hw_fl3l4_clear(self, data);
- if (data->cmd) {
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 |
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3)) {
if (!data->is_ipv6) {
hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
location,
@@ -1042,8 +1336,13 @@ static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
data->ip_src);
}
}
- hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
- hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+
+ if (data->cmd & (HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 |
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4)) {
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ }
+
hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
return aq_hw_err_from_flags(self);
@@ -1164,6 +1463,23 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
- .hw_set_offload = hw_atl_b0_hw_offload_set,
+
+ .hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
+ .hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
+
+ .hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
+ .hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
+
+ .hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
+ .hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
+ .hw_set_sys_clock = hw_atl_b0_set_sys_clock,
+ .hw_ts_to_sys_clock = hw_atl_b0_ts_to_sys_clock,
+ .hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
+ .hw_gpio_pulse = hw_atl_b0_gpio_pulse,
+ .hw_extts_gpio_enable = hw_atl_b0_extts_gpio_enable,
+ .hw_get_sync_ts = hw_atl_b0_get_sync_ts,
+ .rx_extract_ts = hw_atl_b0_rx_extract_ts,
+ .extract_hwts = hw_atl_b0_extract_hwts,
+ .hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_fc = hw_atl_b0_set_fc,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 808d8cd4252a..7ab23a1751d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
@@ -64,8 +64,11 @@
#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
-#define HW_ATL_B0_TXBUF_MAX 160U
-#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_PTP_TXBUF_SIZE 8U
+
+#define HW_ATL_B0_RXBUF_MAX 320U
+#define HW_ATL_B0_PTP_RXBUF_SIZE 16U
#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 1149812ae463..6cadc9054544 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
@@ -572,6 +572,13 @@ void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
rx_traf_class_mode);
}
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPB_RPF_RX_TC_MODE_ADR,
+ HW_ATL_RPB_RPF_RX_TC_MODE_MSK,
+ HW_ATL_RPB_RPF_RX_TC_MODE_SHIFT);
+}
+
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RX_BUF_EN_ADR,
@@ -606,12 +613,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
}
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
{
+ u32 val;
+
+ val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
+
aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
- init);
+ val ^ 1);
+}
+
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
+ RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
}
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
@@ -623,8 +643,8 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
rx_pkt_buff_size_per_tc);
}
-void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
- u32 buffer)
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPB_RXBXOFF_EN_ADR(buffer),
HW_ATL_RPB_RXBXOFF_EN_MSK,
@@ -1277,6 +1297,13 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+ HW_ATL_TPB_TX_TC_MODE_MSK,
+ HW_ATL_TPB_TX_TC_MODE_SHIFT);
+}
+
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
@@ -1513,6 +1540,20 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
glb_cpu_scratch_scp);
}
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK,
+ HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT,
+ ptp_clock_read_enable);
+}
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_PCS_PTP_TS_VAL_ADDR(index));
+}
+
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
@@ -1603,6 +1644,11 @@ u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
}
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self)
+{
+ return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO);
+}
+
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
{
return aq_hw_read_reg(aq_hw,
@@ -1618,3 +1664,60 @@ u32 hw_atl_scrpad25_get(struct aq_hw_s *self)
{
return hw_atl_scrpad_get(self, 0x18);
}
+
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1), value);
+}
+
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(1));
+}
+
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2), value);
+}
+
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(2));
+}
+
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3), value);
+}
+
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(3));
+}
+
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4), value);
+}
+
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(4));
+}
+
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *aq_hw, u32 value)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5), value);
+}
+
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MDIO_IFACE_N_ADR(5));
+}
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_MDIO_BUSY_ADR,
+ HW_ATL_MDIO_BUSY_MSK,
+ HW_ATL_MDIO_BUSY_SHIFT);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 0c37abbabca5..5750b0c9cae7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
@@ -292,6 +292,9 @@ void hw_atl_rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
void hw_atl_rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
+/* get rx traffic class mode */
+u32 hw_atl_rpb_rpf_rx_traf_class_mode_get(struct aq_hw_s *aq_hw);
+
/* set rx buffer enable */
void hw_atl_rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
@@ -306,18 +309,23 @@ void hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 buffer);
/* set rx flow control mode */
-void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
-/* set rdm rx dma descriptor cache init */
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+/* toggle rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
+
+/* get rdm rx dma descriptor cache init done */
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
/* set rx xoff enable (per tc) */
-void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_xoff_en_per_tc,
u32 buffer);
/* rpf */
@@ -602,6 +610,9 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
+/* get TX Traffic Class Mode */
+u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
+
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
@@ -620,7 +631,8 @@ void hw_atl_tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_
/* set tx packet buffer size (per tc) */
void hw_atl_tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
- u32 tx_pkt_buff_size_per_tc, u32 buffer);
+ u32 tx_pkt_buff_size_per_tc,
+ u32 buffer);
/* set tx path pad insert enable */
void hw_atl_tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
@@ -712,6 +724,12 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* set pci register reset disable */
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+/* pcs */
+void hw_atl_pcs_ptp_clock_read_enable(struct aq_hw_s *aq_hw,
+ u32 ptp_clock_read_enable);
+
+u32 hw_atl_pcs_ptp_clock_get(struct aq_hw_s *aq_hw, u32 index);
+
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
@@ -749,9 +767,44 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest);
+/* set Global MDIO Interface 1 */
+void hw_atl_glb_mdio_iface1_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 1 */
+u32 hw_atl_glb_mdio_iface1_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 2 */
+void hw_atl_glb_mdio_iface2_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 2 */
+u32 hw_atl_glb_mdio_iface2_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 3 */
+void hw_atl_glb_mdio_iface3_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 3 */
+u32 hw_atl_glb_mdio_iface3_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 4 */
+void hw_atl_glb_mdio_iface4_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 4 */
+u32 hw_atl_glb_mdio_iface4_get(struct aq_hw_s *hw);
+
+/* set Global MDIO Interface 5 */
+void hw_atl_glb_mdio_iface5_set(struct aq_hw_s *hw, u32 value);
+
+/* get Global MDIO Interface 5 */
+u32 hw_atl_glb_mdio_iface5_get(struct aq_hw_s *hw);
+
+u32 hw_atl_mdio_busy_get(struct aq_hw_s *aq_hw);
+
/* get global microprocessor ram semaphore */
u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
+/* get global microprocessor mdio semaphore */
+u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
+
/* get global microprocessor scratch pad register */
u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index c3febcdfa92e..ec3bcdcefc4d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_llh_internal.h: Preprocessor definitions
@@ -318,6 +318,25 @@
/* default value of bitfield rdm_desc_init_i */
#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+/* rdm_desc_init_done_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
+ * port="pif_rdm_desc_init_done_i"
+ */
+
+/* register address for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
+/* bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
+/* inverted bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
+/* lower bit position of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
+/* width of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
+/* default value of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
+
+
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
@@ -1289,6 +1308,52 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l3_l4_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_l4_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_l4_en_i[0]"
+ */
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(filter) (0x00005380 + (filter) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
/* RX l4_sp{D}[F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
* Parameter: srcport {D} | stride size 0x4 | range [0, 7]
@@ -2421,6 +2486,22 @@
/* default value of bitfield register write strobe */
#define HW_ATL_MSM_REG_WR_STROBE_DEFAULT 0x0
+/* register address for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_ADR 0x00004628
+/* bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSK 0x00000010
+/* inverted bitmask for bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_MSKN 0xFFFFFFEF
+/* lower bit position of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_SHIFT 4
+/* width of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_WIDTH 1
+/* default value of bitfield PTP Digital Clock Read Enable */
+#define HW_ATL_PCS_PTP_CLOCK_READ_ENABLE_DEFAULT 0x0
+
+/* register address for ptp counter reading */
+#define HW_ATL_PCS_PTP_TS_VAL_ADDR(index) (0x00004900 + (index) * 0x4)
+
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
* port="pif_glb_res_i"
@@ -2513,50 +2594,121 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
-#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
-#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
-#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
-
-#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
-
-/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
- * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
- * Parameter: location {D} | stride size 0x4 | range [0, 7]
- * PORT="pif_rpf_l3_sa0_i[31:0]"
- */
-
-/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
-#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
-/* Bitmask for bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
-/* Inverted bitmask for bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
-/* Lower bit position of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_SHIFT 0
-/* Width of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_WIDTH 32
-/* Default value of bitfield l3_sa0[1F:0] */
-#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
-
-/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
- * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
- * Parameter: location {D} | stride size 0x4 | range [0, 7]
- * PORT="pif_rpf_l3_da0_i[31:0]"
- */
-
- /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
-/* Bitmask for bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
-/* Inverted bitmask for bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
-/* Lower bit position of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_SHIFT 0
-/* Width of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_WIDTH 32
-/* Default value of bitfield l3_da0[1F:0] */
-#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
-
+/* Preprocessor definitions for Global MDIO Interfaces
+ * Address: 0x00000280 + 0x4 * Number of interface
+ */
+#define HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN 0x00000280u
+
+#define HW_ATL_GLB_MDIO_IFACE_N_ADR(number) \
+ (HW_ATL_GLB_MDIO_IFACE_ADDR_BEGIN + (((number) - 1) * 0x4))
+
+/* MIF MDIO Busy Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Busy".
+ * PORT="mdio_pif_busy_o"
+ */
+
+/* Register address for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_ADR 0x00000284
+/* Bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSK 0x80000000
+/* Inverted bitmask for bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_MSKN 0x7FFFFFFF
+/* Lower bit position of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_SHIFT 31
+/* Width of bitfield MDIO Busy */
+#define HW_ATL_MDIO_BUSY_WIDTH 1
+
+/* MIF MDIO Execute Operation Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Execute Operation".
+ * PORT="pif_mdio_op_start_i"
+ */
+
+/* Register address for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_ADR 0x00000284
+/* Bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSK 0x00008000
+/* Inverted bitmask for bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_MSKN 0xFFFF7FFF
+/* Lower bit position of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_SHIFT 15
+/* Width of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_WIDTH 1
+/* Default value of bitfield MDIO Execute Operation */
+#define HW_ATL_MDIO_EXECUTE_OPERATION_DEFAULT 0x0
+
+/* MIF Op Mode [1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "Op Mode [1:0]".
+ * PORT="pif_mdio_mode_i[1:0]"
+ */
+
+/* Register address for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_ADR 0x00000284
+/* Bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSK 0x00003000
+/* Inverted bitmask for bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_MSKN 0xFFFFCFFF
+/* Lower bit position of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_SHIFT 12
+/* Width of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_WIDTH 2
+/* Default value of bitfield Op Mode [1:0] */
+#define HW_ATL_MDIO_OP_MODE_DEFAULT 0x0
+
+/* MIF PHY address Bitfield Definitions
+ * Preprocessor definitions for the bitfield "PHY address".
+ * PORT="pif_mdio_phy_addr_i[9:0]"
+ */
+
+/* Register address for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_ADR 0x00000284
+/* Bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSK 0x000003FF
+/* Inverted bitmask for bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_MSKN 0xFFFFFC00
+/* Lower bit position of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_SHIFT 0
+/* Width of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_WIDTH 10
+/* Default value of bitfield PHY address */
+#define HW_ATL_MDIO_PHY_ADDRESS_DEFAULT 0x0
+
+/* MIF MDIO WriteData [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO WriteData [F:0]".
+ * PORT="pif_mdio_wdata_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_ADR 0x00000288
+/* Bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_SHIFT 0
+/* Width of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_WIDTH 16
+/* Default value of bitfield MDIO WriteData [F:0] */
+#define HW_ATL_MDIO_WRITE_DATA_DEFAULT 0x0
+
+/* MIF MDIO Address [F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "MDIO Address [F:0]".
+ * PORT="pif_mdio_addr_i[15:0]"
+ */
+
+/* Register address for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_ADR 0x0000028C
+/* Bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSK 0x0000FFFF
+/* Inverted bitmask for bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_MSKN 0xFFFF0000
+/* Lower bit position of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_SHIFT 0
+/* Width of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_WIDTH 16
+/* Default value of bitfield MDIO Address [F:0] */
+#define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0
+
+#define HW_ATL_FW_SM_MDIO 0x0U
#define HW_ATL_FW_SM_RAM 0x2U
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 52646855495e..6fc5640065bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
@@ -327,8 +327,7 @@ err_exit:
return err;
}
-static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
- u32 cnt)
+int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt)
{
u32 val;
int err = 0;
@@ -964,4 +963,6 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_eee_rate = NULL,
.get_eee_rate = NULL,
.set_flow_control = NULL,
+ .send_fw_request = NULL,
+ .enable_ptp = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 692bed70e104..ee11b107f0a5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
@@ -41,7 +41,15 @@ struct __packed hw_atl_rxd_wb_s {
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
- u16 vlan;
+ __le16 vlan;
+};
+
+/* Hardware rx HW TIMESTAMP writeback */
+struct __packed hw_atl_rxd_hwts_wb_s {
+ u32 sec_hw;
+ u32 ns;
+ u32 sec_lw0;
+ u32 sec_lw1;
};
struct __packed hw_atl_stats_s {
@@ -168,6 +176,34 @@ struct __packed hw_atl_utils_mbox_header {
u32 error;
};
+struct __packed hw_aq_ptp_offset {
+ u16 ingress_100;
+ u16 egress_100;
+ u16 ingress_1000;
+ u16 egress_1000;
+ u16 ingress_2500;
+ u16 egress_2500;
+ u16 ingress_5000;
+ u16 egress_5000;
+ u16 ingress_10000;
+ u16 egress_10000;
+};
+
+enum gpio_pin_function {
+ GPIO_PIN_FUNCTION_NC,
+ GPIO_PIN_FUNCTION_VAUX_ENABLE,
+ GPIO_PIN_FUNCTION_EFUSE_BURN_ENABLE,
+ GPIO_PIN_FUNCTION_SFP_PLUS_DETECT,
+ GPIO_PIN_FUNCTION_TX_DISABLE,
+ GPIO_PIN_FUNCTION_RATE_SEL_0,
+ GPIO_PIN_FUNCTION_RATE_SEL_1,
+ GPIO_PIN_FUNCTION_TX_FAULT,
+ GPIO_PIN_FUNCTION_PTP0,
+ GPIO_PIN_FUNCTION_PTP1,
+ GPIO_PIN_FUNCTION_PTP2,
+ GPIO_PIN_FUNCTION_SIZE
+};
+
struct __packed hw_aq_info {
u8 reserved[6];
u16 phy_fault_code;
@@ -175,9 +211,23 @@ struct __packed hw_aq_info {
u8 cable_len;
u8 reserved1;
u32 cable_diag_data[4];
- u8 reserved2[32];
+ struct hw_aq_ptp_offset ptp_offset;
+ u8 reserved2[12];
u32 caps_lo;
u32 caps_hi;
+ u32 reserved_datapath;
+ u32 reserved3[7];
+ u32 reserved_simpleresp[3];
+ u32 reserved_linkstat[7];
+ u32 reserved_wakes_count;
+ u32 reserved_eee_stat[12];
+ u32 tx_stuck_cnt;
+ u32 setting_address;
+ u32 setting_length;
+ u32 caps_ex;
+ enum gpio_pin_function gpio_pin[3];
+ u32 pcie_aer_dump[18];
+ u16 snr_margin[4];
};
struct __packed hw_atl_utils_mbox {
@@ -237,6 +287,42 @@ struct __packed offload_info {
u8 buf[0];
};
+/* Mailbox FW Request interface */
+struct __packed hw_fw_request_ptp_gpio_ctrl {
+ u32 index;
+ u32 period;
+ u64 start;
+};
+
+struct __packed hw_fw_request_ptp_adj_freq {
+ u32 ns_mac;
+ u32 fns_mac;
+ u32 ns_phy;
+ u32 fns_phy;
+ u32 mac_ns_adj;
+ u32 mac_fns_adj;
+};
+
+struct __packed hw_fw_request_ptp_adj_clock {
+ u32 ns;
+ u32 sec;
+ int sign;
+};
+
+#define HW_AQ_FW_REQUEST_PTP_GPIO_CTRL 0x11
+#define HW_AQ_FW_REQUEST_PTP_ADJ_FREQ 0x12
+#define HW_AQ_FW_REQUEST_PTP_ADJ_CLOCK 0x13
+
+struct __packed hw_fw_request_iface {
+ u32 msg_id;
+ union {
+ /* PTP FW Request */
+ struct hw_fw_request_ptp_gpio_ctrl ptp_gpio_ctrl;
+ struct hw_fw_request_ptp_adj_freq ptp_adj_freq;
+ struct hw_fw_request_ptp_adj_clock ptp_adj_clock;
+ };
+};
+
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
@@ -344,91 +430,135 @@ enum hw_atl_fw2x_rate {
FW2X_RATE_10G = 0x800,
};
+/* 0x370
+ * Link capabilities resolution register
+ */
enum hw_atl_fw2x_caps_lo {
- CAPS_LO_10BASET_HD = 0x00,
+ CAPS_LO_10BASET_HD = 0,
CAPS_LO_10BASET_FD,
CAPS_LO_100BASETX_HD,
CAPS_LO_100BASET4_HD,
CAPS_LO_100BASET2_HD,
- CAPS_LO_100BASETX_FD,
+ CAPS_LO_100BASETX_FD = 5,
CAPS_LO_100BASET2_FD,
CAPS_LO_1000BASET_HD,
CAPS_LO_1000BASET_FD,
CAPS_LO_2P5GBASET_FD,
- CAPS_LO_5GBASET_FD,
+ CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
};
+/* 0x374
+ * Status register
+ */
enum hw_atl_fw2x_caps_hi {
- CAPS_HI_RESERVED1 = 0x00,
+ CAPS_HI_RESERVED1 = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
- CAPS_HI_100BASETX_EEE,
+ CAPS_HI_100BASETX_EEE = 5,
CAPS_HI_RESERVED3,
CAPS_HI_RESERVED4,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
- CAPS_HI_5GBASET_FD_EEE,
+ CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
- CAPS_HI_RESERVED5,
+ CAPS_HI_FW_REQUEST,
CAPS_HI_RESERVED6,
CAPS_HI_RESERVED7,
- CAPS_HI_RESERVED8,
+ CAPS_HI_RESERVED8 = 15,
CAPS_HI_RESERVED9,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
- CAPS_HI_PTP_AVB_EN,
+ CAPS_HI_PTP_AVB_EN_FW2X = 20,
CAPS_HI_MEDIA_DETECT,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
- CAPS_HI_MAC_STOP,
+ CAPS_HI_MAC_STOP = 25,
CAPS_HI_EXT_LOOPBACK,
CAPS_HI_INT_LOOPBACK,
CAPS_HI_EFUSE_AGENT,
CAPS_HI_WOL_TIMER,
- CAPS_HI_STATISTICS,
+ CAPS_HI_STATISTICS = 30,
CAPS_HI_TRANSACTION_ID,
};
+/* 0x36C
+ * Control register
+ */
enum hw_atl_fw2x_ctrl {
- CTRL_RESERVED1 = 0x00,
+ CTRL_RESERVED1 = 0,
CTRL_RESERVED2,
CTRL_RESERVED3,
CTRL_PAUSE,
CTRL_ASYMMETRIC_PAUSE,
- CTRL_RESERVED4,
+ CTRL_RESERVED4 = 5,
CTRL_RESERVED5,
CTRL_RESERVED6,
CTRL_1GBASET_FD_EEE,
CTRL_2P5GBASET_FD_EEE,
- CTRL_5GBASET_FD_EEE,
+ CTRL_5GBASET_FD_EEE = 10,
CTRL_10GBASET_FD_EEE,
CTRL_THERMAL_SHUTDOWN,
CTRL_PHY_LOGS,
CTRL_EEE_AUTO_DISABLE,
- CTRL_PFC,
+ CTRL_PFC = 15,
CTRL_WAKE_ON_LINK,
CTRL_CABLE_DIAG,
CTRL_TEMPERATURE,
CTRL_DOWNSHIFT,
- CTRL_PTP_AVB,
+ CTRL_PTP_AVB = 20,
CTRL_RESERVED7,
CTRL_LINK_DROP,
CTRL_SLEEP_PROXY,
CTRL_WOL,
- CTRL_MAC_STOP,
+ CTRL_MAC_STOP = 25,
CTRL_EXT_LOOPBACK,
CTRL_INT_LOOPBACK,
CTRL_RESERVED8,
CTRL_WOL_TIMER,
- CTRL_STATISTICS,
+ CTRL_STATISTICS = 30,
CTRL_FORCE_RECONNECT,
};
+enum hw_atl_caps_ex {
+ CAPS_EX_LED_CONTROL = 0,
+ CAPS_EX_LED0_MODE_LO,
+ CAPS_EX_LED0_MODE_HI,
+ CAPS_EX_LED1_MODE_LO,
+ CAPS_EX_LED1_MODE_HI,
+ CAPS_EX_LED2_MODE_LO = 5,
+ CAPS_EX_LED2_MODE_HI,
+ CAPS_EX_RESERVED07,
+ CAPS_EX_RESERVED08,
+ CAPS_EX_RESERVED09,
+ CAPS_EX_RESERVED10 = 10,
+ CAPS_EX_RESERVED11,
+ CAPS_EX_RESERVED12,
+ CAPS_EX_RESERVED13,
+ CAPS_EX_RESERVED14,
+ CAPS_EX_RESERVED15 = 15,
+ CAPS_EX_PHY_PTP_EN,
+ CAPS_EX_MAC_PTP_EN,
+ CAPS_EX_EXT_CLK_EN,
+ CAPS_EX_SCHED_DMA_EN,
+ CAPS_EX_PTP_GPIO_EN = 20,
+ CAPS_EX_UPDATE_SETTINGS,
+ CAPS_EX_PHY_CTRL_TS_PIN,
+ CAPS_EX_SNR_OPERATING_MARGIN,
+ CAPS_EX_RESERVED24,
+ CAPS_EX_RESERVED25 = 25,
+ CAPS_EX_RESERVED26,
+ CAPS_EX_RESERVED27,
+ CAPS_EX_RESERVED28,
+ CAPS_EX_RESERVED29,
+ CAPS_EX_RESERVED30 = 30,
+ CAPS_EX_RESERVED31
+};
+
struct aq_hw_s;
struct aq_fw_ops;
struct aq_hw_caps_s;
@@ -475,6 +605,8 @@ struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
+int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt);
+
int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index da726489e3c8..f649ac949d06 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
*/
/* File hw_atl_utils_fw2x.c: Definition of firmware 2.x functions for
@@ -17,14 +17,17 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
-#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
+#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
-#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
-#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
-#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
-#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
-#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
-#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
+#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
+#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
+#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
+#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+
+#define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
+#define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
@@ -337,7 +340,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
/* Convert PHY temperature from 1/256 degree Celsius
* to 1/1000 degree Celsius.
*/
- *temp = temp_res * 1000 / 256;
+ *temp = (temp_res & 0xFFFF) * 1000 / 256;
return 0;
}
@@ -444,6 +447,54 @@ err_exit:
return err;
}
+static int aq_fw2x_send_fw_request(struct aq_hw_s *self,
+ const struct hw_fw_request_iface *fw_req,
+ size_t size)
+{
+ u32 ctrl2, orig_ctrl2;
+ u32 dword_cnt;
+ int err = 0;
+ u32 val;
+
+ /* Write data to drvIface Mailbox */
+ dword_cnt = size / sizeof(u32);
+ if (size % sizeof(u32))
+ dword_cnt++;
+ err = hw_atl_utils_fw_upload_dwords(self, aq_fw2x_rpc_get(self),
+ (void *)fw_req, dword_cnt);
+ if (err < 0)
+ goto err_exit;
+
+ /* Toggle statistics bit for FW to update */
+ ctrl2 = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ orig_ctrl2 = ctrl2 & BIT(CAPS_HI_FW_REQUEST);
+ ctrl2 = ctrl2 ^ BIT(CAPS_HI_FW_REQUEST);
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, ctrl2);
+
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ orig_ctrl2 != (val &
+ BIT(CAPS_HI_FW_REQUEST)),
+ 1U, 10000U);
+
+err_exit:
+ return err;
+}
+
+static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
+{
+ u32 ptp_opts = aq_hw_read_reg(self, HW_ATL_FW3X_EXT_STATE_ADDR);
+ u32 all_ptp_features = BIT(CAPS_EX_PHY_PTP_EN) |
+ BIT(CAPS_EX_PTP_GPIO_EN);
+
+ if (enable)
+ ptp_opts |= all_ptp_features;
+ else
+ ptp_opts &= ~all_ptp_features;
+
+ aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
+}
+
static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
@@ -534,19 +585,21 @@ static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
}
const struct aq_fw_ops aq_fw_2x_ops = {
- .init = aq_fw2x_init,
- .deinit = aq_fw2x_deinit,
- .reset = NULL,
- .renegotiate = aq_fw2x_renegotiate,
- .get_mac_permanent = aq_fw2x_get_mac_permanent,
- .set_link_speed = aq_fw2x_set_link_speed,
- .set_state = aq_fw2x_set_state,
+ .init = aq_fw2x_init,
+ .deinit = aq_fw2x_deinit,
+ .reset = NULL,
+ .renegotiate = aq_fw2x_renegotiate,
+ .get_mac_permanent = aq_fw2x_get_mac_permanent,
+ .set_link_speed = aq_fw2x_set_link_speed,
+ .set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
- .update_stats = aq_fw2x_update_stats,
- .get_phy_temp = aq_fw2x_get_phy_temp,
- .set_power = aq_fw2x_set_power,
- .set_eee_rate = aq_fw2x_set_eee_rate,
- .get_eee_rate = aq_fw2x_get_eee_rate,
- .set_flow_control = aq_fw2x_set_flow_control,
- .get_flow_control = aq_fw2x_get_flow_control
+ .update_stats = aq_fw2x_update_stats,
+ .get_phy_temp = aq_fw2x_get_phy_temp,
+ .set_power = aq_fw2x_set_power,
+ .set_eee_rate = aq_fw2x_set_eee_rate,
+ .get_eee_rate = aq_fw2x_get_eee_rate,
+ .set_flow_control = aq_fw2x_set_flow_control,
+ .get_flow_control = aq_fw2x_get_flow_control,
+ .send_fw_request = aq_fw2x_send_fw_request,
+ .enable_ptp = aq_fw3x_enable_ptp,
};
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 78e52d217e56..539166112993 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -20,9 +20,10 @@
static int emac_arc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct net_device *ndev;
struct arc_emac_priv *priv;
- int interface, err;
+ phy_interface_t interface;
+ struct net_device *ndev;
+ int err;
if (!dev->of_node)
return -ENODEV;
@@ -37,9 +38,13 @@ static int emac_arc_probe(struct platform_device *pdev)
priv->drv_name = DRV_NAME;
priv->drv_version = DRV_VERSION;
- interface = of_get_phy_mode(dev->of_node);
- if (interface < 0)
- interface = PHY_INTERFACE_MODE_MII;
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err) {
+ if (err == -ENODEV)
+ interface = PHY_INTERFACE_MODE_MII;
+ else
+ goto out_netdev;
+ }
priv->clk = devm_clk_get(dev, "hclk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 42d2e1b02c44..aae231c5224f 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -97,8 +97,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
struct net_device *ndev;
struct rockchip_priv_data *priv;
const struct of_device_id *match;
+ phy_interface_t interface;
u32 data;
- int err, interface;
+ int err;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -114,7 +115,9 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv->emac.drv_version = DRV_VERSION;
priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
- interface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &interface);
+ if (err)
+ goto out_netdev;
/* RK3036/RK3066/RK3188 SoCs only support RMII */
if (interface != PHY_INTERFACE_MODE_RMII) {
@@ -256,6 +259,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
if (priv->regulator)
regulator_disable(priv->regulator);
+ if (priv->soc_data->need_div_macclk)
+ clk_disable_unprepare(priv->macclk);
+
free_netdev(ndev);
return err;
}
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 7548247455d7..8f5021091eee 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -526,7 +526,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
static struct mii_bus *mii_bus;
- struct device_node *np;
+ struct device_node *np, *mnp;
int err;
np = dev->of_node;
@@ -571,7 +571,9 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
msleep(200);
}
- err = of_mdiobus_register(mii_bus, np);
+ mnp = of_get_child_by_name(np, "mdio");
+ err = of_mdiobus_register(mii_bus, mnp);
+ of_node_put(mnp);
if (err)
goto mdio_err_put_clk;
@@ -1742,10 +1744,9 @@ static int ag71xx_probe(struct platform_device *pdev)
eth_random_addr(ndev->dev_addr);
}
- ag->phy_if_mode = of_get_phy_mode(np);
- if (ag->phy_if_mode < 0) {
+ err = of_get_phy_mode(np, ag->phy_if_mode);
+ if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- err = ag->phy_if_mode;
goto err_free;
}
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 37752d9514e7..30b455013bf3 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1371,8 +1371,8 @@ static int nb8800_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->base = base;
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if (priv->phy_mode < 0)
+ ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (ret)
priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
priv->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index aacc3cce2cc0..40941fb6065b 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -287,7 +287,7 @@ struct nb8800_priv {
struct device_node *phy_node;
/* PHY connection type from DT */
- int phy_mode;
+ phy_interface_t phy_mode;
/* Current link status */
int speed;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e24f5d2b6afe..53055ce5dfd6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
default y
depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
SIBYTE_SB1xxx_SOC
- select DIMLIB
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -69,6 +68,7 @@ config BCMGENET
select FIXED_PHY
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
+ select DIMLIB
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
@@ -188,6 +188,7 @@ config SYSTEMPORT
select MII
select PHYLIB
select FIXED_PHY
+ select DIMLIB
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset using an internal
@@ -200,6 +201,7 @@ config BNXT
select LIBCRC32C
select NET_DEVLINK
select PAGE_POOL
+ select DIMLIB
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 97ab0dd25552..035dbb1b2c98 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -511,9 +511,6 @@ static void b44_stats_update(struct b44 *bp)
*val++ += br32(bp, reg);
}
- /* Pad */
- reg += 8*4UL;
-
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a977a459bd20..825af709708e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2479,9 +2479,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->netdev = dev;
priv->pdev = pdev;
- priv->phy_interface = of_get_phy_mode(dn);
+ ret = of_get_phy_mode(dn, &priv->phy_interface);
/* Default to GMII interface mode */
- if ((int)priv->phy_interface < 0)
+ if (ret)
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
/* In the case of a fixed PHY, the DT node associated
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d10b421ed1f1..5e037a305b83 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1934,7 +1934,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
+ return netdev_pick_tx(dev, skb, NULL) %
+ (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 226ab29f4cb6..3f8435208bf4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -32,31 +32,31 @@
* IRO[142].m2) + ((sbId) * IRO[142].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[323].base + ((pfId) * IRO[323].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[324].base + ((pfId) * IRO[324].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[325].base + ((pfId) * IRO[325].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
+ (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
+ (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
+ (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
+ (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
+ (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
+ (IRO[322].base + ((pfId) * IRO[322].m1) + ((iscsiEqId) * IRO[322].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
+ (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[322].base + ((pfId) * IRO[322].m1))
+ (IRO[323].base + ((pfId) * IRO[323].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[314].base + ((pfId) * IRO[314].m1))
+ (IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[313].base + ((pfId) * IRO[313].m1))
+ (IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[312].base + ((pfId) * IRO[312].m1))
+ (IRO[313].base + ((pfId) * IRO[313].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[155].base + ((funcId) * IRO[155].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -99,81 +99,81 @@
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[107].base + ((funcId) * IRO[107].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
-#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[276].base + ((pfId) * IRO[276].m1))
+ (IRO[277].base + ((pfId) * IRO[277].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[275].base + ((pfId) * IRO[275].m1))
+ (IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[274].base + ((pfId) * IRO[274].m1))
+ (IRO[275].base + ((pfId) * IRO[275].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[270].base + ((pfId) * IRO[270].m1))
-#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[271].base + ((pfId) * IRO[271].m1))
-#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[272].base + ((pfId) * IRO[272].m1))
-#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[206].base + ((pfId) * IRO[206].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[109].base + ((funcId) * IRO[109].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
- (IRO[223].base + ((pfId) * IRO[223].m1))
+ (IRO[224].base + ((pfId) * IRO[224].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[108].base + ((funcId) * IRO[108].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
-#define USTORM_AGG_DATA_SIZE (IRO[212].size)
+#define USTORM_AGG_DATA_OFFSET (IRO[213].base)
+#define USTORM_AGG_DATA_SIZE (IRO[213].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[180].base + ((assertListEntry) * IRO[180].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[187].base + ((portId) * IRO[187].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[325].base + ((pfId) * IRO[325].m1))
+ (IRO[326].base + ((pfId) * IRO[326].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[182].base + ((funcId) * IRO[182].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[286].base + ((pfId) * IRO[286].m1))
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
+ (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[186].base + ((pfId) * IRO[186].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[184].base + ((funcId) * IRO[184].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
- (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
- IRO[215].m2))
+ (IRO[216].base + ((portId) * IRO[216].m1) + ((clientId) * \
+ IRO[216].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
- (IRO[216].base + ((qzoneId) * IRO[216].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
-#define USTORM_TPA_BTR_SIZE (IRO[213].size)
+ (IRO[217].base + ((qzoneId) * IRO[217].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[214].base)
+#define USTORM_TPA_BTR_SIZE (IRO[214].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[183].base + ((funcId) * IRO[183].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
@@ -188,39 +188,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[302].base + ((pfId) * IRO[302].m1))
+ (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[306].base + ((pfId) * IRO[306].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[307].base + ((pfId) * IRO[307].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[308].base + ((pfId) * IRO[308].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[309].base + ((pfId) * IRO[309].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[301].base + ((pfId) * IRO[301].m1))
+ (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[300].base + ((pfId) * IRO[300].m1))
+ (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[299].base + ((pfId) * IRO[299].m1))
+ (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[304].base + ((pfId) * IRO[304].m1))
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[303].base + ((pfId) * IRO[303].m1))
+ (IRO[304].base + ((pfId) * IRO[304].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
+ (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -233,12 +233,12 @@
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
- (IRO[217].base + ((portId) * IRO[217].m1))
-#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[218].base + ((portId) * IRO[218].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+ (IRO[219].base + ((portId) * IRO[219].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
- (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
- IRO[220].m2))
+ (IRO[221].base + (((pfId)>>1) * IRO[221].m1) + (((pfId)&1) * \
+ IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 78326a6c0aba..622fadc50316 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 11
+#define BCM_5710_FW_REVISION_VERSION 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0edbb0a76847..5097a44686b3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2397,15 +2397,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
/* send the ramrod on all the queues of the PF */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
/* Set the appropriate Queue object */
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure Tx switching\n");
- return rc;
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b4a8cf620a0c..c07172429c70 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8762,6 +8762,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
if (resc_reinit || fw_reset) {
if (fw_reset) {
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_ulp_stop(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -9224,13 +9226,16 @@ static int bnxt_open(struct net_device *dev)
if (rc) {
bnxt_hwrm_if_change(bp, false);
} else {
- if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
- BNXT_PF(bp)) {
- struct bnxt_pf_info *pf = &bp->pf;
- int n = pf->active_vfs;
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
- if (n)
- bnxt_cfg_hw_sriov(bp, &n, true);
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_ulp_start(bp, 0);
}
bnxt_hwmon_open(bp);
}
@@ -9927,12 +9932,15 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (netif_running(bp->dev)) {
int rc;
- if (!silent)
+ if (silent) {
+ bnxt_close_nic(bp, false, false);
+ bnxt_open_nic(bp, false, false);
+ } else {
bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, false, false);
- rc = bnxt_open_nic(bp, false, false);
- if (!silent && !rc)
- bnxt_ulp_start(bp);
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
+ bnxt_ulp_start(bp, rc);
+ }
}
}
@@ -10004,7 +10012,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->link_info.phy_retry) {
if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
- bp->link_info.phy_retry = 0;
+ bp->link_info.phy_retry = false;
netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
} else {
set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
@@ -10048,8 +10056,8 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
+ bnxt_ulp_stop(bp);
__bnxt_close_nic(bp, true, false);
- bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_ctx_mem(bp);
@@ -10382,7 +10390,8 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
{
bnxt_unmap_bars(bp, bp->pdev);
pci_release_regions(bp->pdev);
- pci_disable_device(bp->pdev);
+ if (pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
}
static void bnxt_init_dflt_coal(struct bnxt *bp)
@@ -10581,14 +10590,23 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
- int i;
+ int i, rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
+#ifdef CONFIG_TEE_BNXT_FW
+ rc = tee_bnxt_fw_load();
+ if (rc)
+ netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ bp->fw_reset_timestamp = jiffies;
+#endif
+ return;
+ }
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
bnxt_fw_reset_writel(bp, i);
} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
struct hwrm_fw_reset_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
@@ -10669,14 +10687,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
}
/* fall through */
- case BNXT_FW_RESET_STATE_RESET_FW: {
- u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
-
+ case BNXT_FW_RESET_STATE_RESET_FW:
bnxt_reset_all(bp);
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
- bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return;
- }
case BNXT_FW_RESET_STATE_ENABLE_DEV:
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
bp->fw_health) {
@@ -10722,13 +10737,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
dev_close(bp->dev);
}
- bnxt_ulp_irq_restart(bp, rc);
- rtnl_unlock();
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_ulp_start(bp, rc);
+ rtnl_unlock();
break;
}
return;
@@ -10936,7 +10951,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-static LIST_HEAD(bnxt_block_cb_list);
+LIST_HEAD(bnxt_block_cb_list);
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
@@ -11884,6 +11899,7 @@ static int bnxt_suspend(struct device *device)
int rc = 0;
rtnl_lock();
+ bnxt_ulp_stop(bp);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -11917,6 +11933,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ bnxt_ulp_start(bp, rc);
rtnl_unlock();
return rc;
}
@@ -11996,10 +12013,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- if (!err) {
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- bnxt_ulp_start(bp);
- }
+ bnxt_ulp_start(bp, err);
}
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d333589811a5..a3545c846bfb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -25,6 +25,11 @@
#include <net/dst_metadata.h>
#include <net/xdp.h>
#include <linux/dim.h>
+#ifdef CONFIG_TEE_BNXT_FW
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+#endif
+
+extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -1241,6 +1246,14 @@ struct bnxt_tc_flow_stats {
u64 bytes;
};
+#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+struct bnxt_flower_indr_block_cb_priv {
+ struct net_device *tunnel_netdev;
+ struct bnxt *bp;
+ struct list_head list;
+};
+#endif
+
struct bnxt_tc_info {
bool enabled;
@@ -1804,6 +1817,9 @@ struct bnxt {
u8 num_leds;
struct bnxt_led_info leds[BNXT_MAX_LED];
+ u16 dump_flag;
+#define BNXT_DUMP_LIVE 0
+#define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog;
@@ -1815,6 +1831,8 @@ struct bnxt {
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
+ struct list_head tc_indr_block_list;
+ struct notifier_block tc_netdev_nb;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index e664392dccc0..ae4ddf33fe5c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -16,7 +16,8 @@
#include "bnxt_devlink.h"
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *health = bp->fw_health;
@@ -29,25 +30,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
health_status = val & 0xffff;
- if (health_status == BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
- "Healthy;");
- if (rc)
- return rc;
- } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
- "Not yet completed initialization;");
+ if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+ "Not yet completed initialization");
if (rc)
return rc;
} else if (health_status > BNXT_FW_STATUS_HEALTHY) {
- rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
- "Encountered fatal error and cannot recover;");
+ rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+ "Encountered fatal error and cannot recover");
if (rc)
return rc;
}
if (val >> 16) {
- rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
if (rc)
return rc;
}
@@ -66,7 +62,8 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
};
static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
@@ -84,7 +81,8 @@ struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
};
static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
@@ -215,25 +213,68 @@ enum bnxt_dl_param_id {
static const struct bnxt_dl_nvm_param nvm_params[] = {
{DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
- BNXT_NVM_SHARED_CFG, 1},
+ BNXT_NVM_SHARED_CFG, 1, 1},
{DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
- BNXT_NVM_SHARED_CFG, 1},
+ BNXT_NVM_SHARED_CFG, 1, 1},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
- NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+ NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
- NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+ NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
- BNXT_NVM_SHARED_CFG, 1},
+ BNXT_NVM_SHARED_CFG, 1, 1},
+};
+
+union bnxt_nvm_data {
+ u8 val8;
+ __le32 val32;
};
+static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
+ union devlink_param_value *src,
+ int nvm_num_bits, int dl_num_bytes)
+{
+ u32 val32 = 0;
+
+ if (nvm_num_bits == 1) {
+ dst->val8 = src->vbool;
+ return;
+ }
+ if (dl_num_bytes == 4)
+ val32 = src->vu32;
+ else if (dl_num_bytes == 2)
+ val32 = (u32)src->vu16;
+ else if (dl_num_bytes == 1)
+ val32 = (u32)src->vu8;
+ dst->val32 = cpu_to_le32(val32);
+}
+
+static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
+ union bnxt_nvm_data *src,
+ int nvm_num_bits, int dl_num_bytes)
+{
+ u32 val32;
+
+ if (nvm_num_bits == 1) {
+ dst->vbool = src->val8;
+ return;
+ }
+ val32 = le32_to_cpu(src->val32);
+ if (dl_num_bytes == 4)
+ dst->vu32 = val32;
+ else if (dl_num_bytes == 2)
+ dst->vu16 = (u16)val32;
+ else if (dl_num_bytes == 1)
+ dst->vu8 = (u8)val32;
+}
+
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- void *data_addr = NULL, *buf = NULL;
struct bnxt_dl_nvm_param nvm_param;
- int bytesize, idx = 0, rc, i;
+ union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
+ int idx = 0, rc, i;
/* Get/Set NVM CFG parameter is supported only on PFs */
if (BNXT_VF(bp))
@@ -254,47 +295,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
- bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
- switch (bytesize) {
- case 1:
- if (nvm_param.num_bits == 1)
- buf = &val->vbool;
- else
- buf = &val->vu8;
- break;
- case 2:
- buf = &val->vu16;
- break;
- case 4:
- buf = &val->vu32;
- break;
- default:
- return -EFAULT;
- }
-
- data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
- &data_dma_addr, GFP_KERNEL);
- if (!data_addr)
+ data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+ &data_dma_addr, GFP_KERNEL);
+ if (!data)
return -ENOMEM;
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.num_bits);
+ req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
req->option_num = cpu_to_le16(nvm_param.offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- memcpy(data_addr, buf, bytesize);
+ bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
+ nvm_param.dl_num_bytes);
rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bnxt_copy_from_nvm_data(val, data,
+ nvm_param.nvm_num_bits,
+ nvm_param.dl_num_bytes);
}
- if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
- memcpy(buf, data_addr, bytesize);
-
- dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+ dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b97e0baeb42d..2f4fd0a7d04b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param {
u16 id;
u16 offset;
u16 dir_type;
- u16 num_bits;
+ u16 nvm_num_bits;
+ u8 dl_num_bytes;
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 51c140476717..f2220b826d61 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -3311,6 +3311,24 @@ err:
return rc;
}
+static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (dump->flag > BNXT_DUMP_CRASH) {
+ netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
+ netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ }
+
+ bp->dump_flag = dump->flag;
+ return 0;
+}
+
static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3323,7 +3341,12 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_bld_8b << 8 |
bp->ver_resp.hwrm_fw_rsvd_8b;
- return bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->flag = bp->dump_flag;
+ if (bp->dump_flag == BNXT_DUMP_CRASH)
+ dump->len = BNXT_CRASH_DUMP_LEN;
+ else
+ bnxt_get_coredump(bp, NULL, &dump->len);
+ return 0;
}
static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
@@ -3336,7 +3359,16 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
- return bnxt_get_coredump(bp, buf, &dump->len);
+ dump->flag = bp->dump_flag;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+ return tee_bnxt_copy_coredump(buf, 0, dump->len);
+#endif
+ } else {
+ return bnxt_get_coredump(bp, buf, &dump->len);
+ }
+
+ return 0;
}
void bnxt_ethtool_init(struct bnxt *bp)
@@ -3446,6 +3478,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
.reset = bnxt_reset,
+ .set_dump = bnxt_set_dump,
.get_dump_flag = bnxt_get_dump_flag,
.get_dump_data = bnxt_get_dump_data,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index b5b65b3f8534..01de7e79d14f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -59,6 +59,8 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index c8062d020d1e..174412a55e53 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -16,7 +16,9 @@
#include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
+#include <net/vxlan.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -36,6 +38,8 @@
#define is_vid_exactmatch(vlan_tci_mask) \
((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+static bool is_wildcard(void *mask, int len);
+static bool is_exactmatch(void *mask, int len);
/* Return the dst fid of the func for flow forwarding
* For PFs: src_fid is the fid of the PF
* For VF-reps: src_fid the fid of the VF
@@ -111,10 +115,182 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
return 0;
}
+/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes
+ * each(u32).
+ * This routine consolidates such multiple unaligned values into one
+ * field each for Key & Mask (for src and dst macs separately)
+ * For example,
+ * Mask/Key Offset Iteration
+ * ========== ====== =========
+ * dst mac 0xffffffff 0 1
+ * dst mac 0x0000ffff 4 2
+ *
+ * src mac 0xffff0000 4 1
+ * src mac 0xffffffff 8 2
+ *
+ * The above combination coming from the stack will be consolidated as
+ * Mask/Key
+ * ==============
+ * src mac: 0xffffffffffff
+ * dst mac: 0xffffffffffff
+ */
+static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
+ u8 *actual_key, u8 *actual_mask)
+{
+ u32 key = get_unaligned((u32 *)actual_key);
+ u32 mask = get_unaligned((u32 *)actual_mask);
+
+ part_key &= part_mask;
+ part_key |= key & ~part_mask;
+
+ put_unaligned(mask | part_mask, (u32 *)actual_mask);
+ put_unaligned(part_key, (u32 *)actual_key);
+}
+
+static int
+bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
+ u16 *eth_addr, u16 *eth_addr_mask)
+{
+ u16 *p;
+ int j;
+
+ if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)))
+ return -EINVAL;
+
+ if (!is_wildcard(&eth_addr_mask[0], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[0], ETH_ALEN))
+ return -EINVAL;
+ /* FW expects dmac to be in u16 array format */
+ p = eth_addr;
+ for (j = 0; j < 3; j++)
+ actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
+ }
+
+ if (!is_wildcard(&eth_addr_mask[ETH_ALEN], ETH_ALEN)) {
+ if (!is_exactmatch(&eth_addr_mask[ETH_ALEN], ETH_ALEN))
+ return -EINVAL;
+ /* FW expects smac to be in u16 array format */
+ p = &eth_addr[ETH_ALEN / 2];
+ for (j = 0; j < 3; j++)
+ actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
+ }
+
+ return 0;
+}
+
+static int
+bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
+ struct flow_action_entry *act, int act_idx, u8 *eth_addr,
+ u8 *eth_addr_mask)
+{
+ size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr);
+ size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr);
+ u32 mask, val, offset, idx;
+ u8 htype;
+
+ offset = act->mangle.offset;
+ htype = act->mangle.htype;
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) {
+ netdev_err(bp->dev,
+ "%s: eth_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+ actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE;
+
+ bnxt_set_l2_key_mask(val, mask, &eth_addr[offset],
+ &eth_addr_mask[offset]);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+ actions->nat.l3_is_ipv4 = true;
+ if (offset == offsetof(struct iphdr, saddr)) {
+ actions->nat.src_xlate = true;
+ actions->nat.l3.ipv4.saddr.s_addr = htonl(val);
+ } else if (offset == offsetof(struct iphdr, daddr)) {
+ actions->nat.src_xlate = false;
+ actions->nat.l3.ipv4.daddr.s_addr = htonl(val);
+ } else {
+ netdev_err(bp->dev,
+ "%s: IPv4_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n",
+ actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr,
+ &actions->nat.l3.ipv4.daddr);
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
+ actions->nat.l3_is_ipv4 = false;
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offset_of_ip6_daddr) {
+ /* 16 byte IPv6 address comes in 4 iterations of
+ * 4byte chunks each
+ */
+ actions->nat.src_xlate = true;
+ idx = (offset - offset_of_ip6_saddr) / 4;
+ /* First 4bytes will be copied to idx 0 and so on */
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ } else if (offset >= offset_of_ip6_daddr &&
+ offset < offset_of_ip6_daddr + 16) {
+ actions->nat.src_xlate = false;
+ idx = (offset - offset_of_ip6_daddr) / 4;
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ } else {
+ netdev_err(bp->dev,
+ "%s: IPv6_hdr: Invalid pedit field\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* HW does not support L4 rewrite alone without L3
+ * rewrite
+ */
+ if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
+ netdev_err(bp->dev,
+ "Need to specify L3 rewrite as well\n");
+ return -EINVAL;
+ }
+ if (actions->nat.src_xlate)
+ actions->nat.l4.ports.sport = htons(val);
+ else
+ actions->nat.l4.ports.dport = htons(val);
+ netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
+ actions->nat.l4.ports.sport,
+ actions->nat.l4.ports.dport);
+ break;
+ default:
+ netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
struct flow_action *flow_action)
{
+ /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
+ * dmac or smac
+ */
+ u16 eth_addr_mask[ETH_ALEN] = { 0 };
+ /* Used to store the L2 rewrite key for dmac (6 bytes) followed by
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
+ * dmac or smac
+ */
+ u16 eth_addr[ETH_ALEN] = { 0 };
struct flow_action_entry *act;
int i, rc;
@@ -148,11 +324,26 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
case FLOW_ACTION_TUNNEL_DECAP:
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
break;
+ /* Packet edit: L2 rewrite, NAT, NAPT */
+ case FLOW_ACTION_MANGLE:
+ rc = bnxt_tc_parse_pedit(bp, actions, act, i,
+ (u8 *)eth_addr,
+ (u8 *)eth_addr_mask);
+ if (rc)
+ return rc;
+ break;
default:
break;
}
}
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+ rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
+ eth_addr_mask);
+ if (rc)
+ return rc;
+ }
+
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
/* dst_fid is PF's fid */
@@ -401,6 +592,76 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle;
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
+ memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
+ ETH_ALEN);
+ memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
+ ETH_ALEN);
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
+ }
+
+ if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) {
+ if (actions->nat.l3_is_ipv4) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS;
+
+ if (actions->nat.src_xlate) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+ /* L3 source rewrite */
+ req.nat_ip_address[0] =
+ actions->nat.l3.ipv4.saddr.s_addr;
+ /* L4 source port */
+ if (actions->nat.l4.ports.sport)
+ req.nat_port =
+ actions->nat.l4.ports.sport;
+ } else {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+ /* L3 destination rewrite */
+ req.nat_ip_address[0] =
+ actions->nat.l3.ipv4.daddr.s_addr;
+ /* L4 destination port */
+ if (actions->nat.l4.ports.dport)
+ req.nat_port =
+ actions->nat.l4.ports.dport;
+ }
+ netdev_dbg(bp->dev,
+ "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
+ req.nat_ip_address, actions->nat.src_xlate,
+ req.nat_port);
+ } else {
+ if (actions->nat.src_xlate) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
+ /* L3 source rewrite */
+ memcpy(req.nat_ip_address,
+ actions->nat.l3.ipv6.saddr.s6_addr32,
+ sizeof(req.nat_ip_address));
+ /* L4 source port */
+ if (actions->nat.l4.ports.sport)
+ req.nat_port =
+ actions->nat.l4.ports.sport;
+ } else {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
+ /* L3 destination rewrite */
+ memcpy(req.nat_ip_address,
+ actions->nat.l3.ipv6.daddr.s6_addr32,
+ sizeof(req.nat_ip_address));
+ /* L4 destination port */
+ if (actions->nat.l4.ports.dport)
+ req.nat_port =
+ actions->nat.l4.ports.dport;
+ }
+ netdev_dbg(bp->dev,
+ "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
+ req.nat_ip_address, actions->nat.src_xlate,
+ req.nat_port);
+ }
+ }
+
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
req.tunnel_handle = tunnel_handle;
@@ -1274,7 +1535,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -EOPNOTSUPP;
- goto free_node;
+ kfree_rcu(new_node, rcu);
+ return rc;
}
/* If a flow exists with the same cookie, delete it */
@@ -1580,6 +1842,147 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
}
}
+static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+ struct flow_cls_offload *flower = type_data;
+ struct bnxt *bp = priv->bp;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct bnxt_flower_indr_block_cb_priv *
+bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
+{
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
+ if (cb_priv->tunnel_netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static void bnxt_tc_setup_indr_rel(void *cb_priv)
+{
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
+ struct flow_block_offload *f)
+{
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->tunnel_netdev = netdev;
+ cb_priv->bp = bp;
+ list_add(&cb_priv->list, &bp->tc_indr_block_list);
+
+ block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ bnxt_tc_setup_indr_rel);
+ if (IS_ERR(block_cb)) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ return PTR_ERR(block_cb);
+ }
+
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev);
+ if (!cb_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ bnxt_tc_setup_indr_block_cb,
+ cb_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
+{
+ return netif_is_vxlan(netdev);
+}
+
+static int bnxt_tc_indr_block_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct bnxt *bp;
+ int rc;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ if (!bnxt_is_netdev_indr_offload(netdev))
+ return NOTIFY_OK;
+
+ bp = container_of(nb, struct bnxt, tc_netdev_nb);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ rc = __flow_indr_block_cb_register(netdev, bp,
+ bnxt_tc_setup_indr_cb,
+ bp);
+ if (rc)
+ netdev_info(bp->dev,
+ "Failed to register indirect blk: dev: %s",
+ netdev->name);
+ break;
+ case NETDEV_UNREGISTER:
+ __flow_indr_block_cb_unregister(netdev,
+ bnxt_tc_setup_indr_cb,
+ bp);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static const struct rhashtable_params bnxt_tc_flow_ht_params = {
.head_offset = offsetof(struct bnxt_tc_flow_node, node),
.key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
@@ -1663,7 +2066,15 @@ int bnxt_init_tc(struct bnxt *bp)
bp->dev->hw_features |= NETIF_F_HW_TC;
bp->dev->features |= NETIF_F_HW_TC;
bp->tc_info = tc_info;
- return 0;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&bp->tc_indr_block_list);
+ bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event;
+ rc = register_netdevice_notifier(&bp->tc_netdev_nb);
+ if (!rc)
+ return 0;
+
+ rhashtable_destroy(&tc_info->encap_table);
destroy_decap_table:
rhashtable_destroy(&tc_info->decap_table);
@@ -1685,6 +2096,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
if (!bnxt_tc_flower_enabled(bp))
return;
+ unregister_netdevice_notifier(&bp->tc_netdev_nb);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 4f05305052f2..286754903543 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -62,6 +62,12 @@ struct bnxt_tc_tunnel_key {
__be32 id;
};
+#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \
+ ((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \
+ is_wildcard(&(eth_addr)[ETH_ALEN], ETH_ALEN)) || \
+ (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \
+ is_wildcard(&(eth_addr_mask)[ETH_ALEN], ETH_ALEN)))
+
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
@@ -71,6 +77,8 @@ struct bnxt_tc_actions {
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6)
#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7)
+#define BNXT_TC_ACTION_FLAG_L2_REWRITE BIT(8)
+#define BNXT_TC_ACTION_FLAG_NAT_XLATE BIT(9)
u16 dst_fid;
struct net_device *dst_dev;
@@ -79,6 +87,18 @@ struct bnxt_tc_actions {
/* tunnel encap */
struct ip_tunnel_key tun_encap_key;
+#define PEDIT_OFFSET_SMAC_LAST_4_BYTES 0x8
+ __be16 l2_rewrite_dmac[3];
+ __be16 l2_rewrite_smac[3];
+ struct {
+ bool src_xlate; /* true => translate src,
+ * false => translate dst
+ * Mutually exclusive, i.e cannot set both
+ */
+ bool l3_is_ipv4; /* false means L3 is ipv6 */
+ struct bnxt_tc_l3_key l3;
+ struct bnxt_tc_l4_key l4;
+ } nat;
};
struct bnxt_tc_flow {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b2c160947fc8..077fd101be60 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -182,7 +182,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev)) {
+ if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
@@ -266,6 +266,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@@ -276,7 +277,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
}
}
-void bnxt_ulp_start(struct bnxt *bp)
+void bnxt_ulp_start(struct bnxt *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
@@ -285,6 +286,11 @@ void bnxt_ulp_start(struct bnxt *bp)
if (!edev)
return;
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+
+ if (err)
+ return;
+
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index cd78453d0bf0..9895406b9830 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -64,6 +64,7 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
+ #define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
@@ -92,7 +93,7 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
-void bnxt_ulp_start(struct bnxt *bp);
+void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 155599dcee76..61ab7d21f6bd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5208,6 +5208,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
+ data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
+
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 12cb77ef1081..4f689fb7a61c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2018,6 +2018,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
*/
if (priv->internal_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ int0_enable |= UMAC_IRQ_PHY_DET_R;
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -2611,11 +2613,14 @@ static void bcmgenet_irq_task(struct work_struct *work)
priv->irq0_stat = 0;
spin_unlock_irq(&priv->lock);
+ if (status & UMAC_IRQ_PHY_DET_R &&
+ priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+ phy_init_hw(priv->dev->phydev);
+
/* Link UP/DOWN event */
- if (status & UMAC_IRQ_LINK_EVENT) {
- priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->dev->phydev);
- }
+
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2710,7 +2715,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
/* all other interested interrupts handled in bottom half */
- status &= UMAC_IRQ_LINK_EVENT;
+ status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
if (status) {
/* Save irq status for bottom-half processing. */
spin_lock_irqsave(&priv->lock, flags);
@@ -2874,6 +2879,12 @@ static int bcmgenet_open(struct net_device *dev)
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ ret = bcmgenet_mii_connect(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_clk_disable;
+ }
+
/* take MAC out of reset */
bcmgenet_umac_reset(priv);
@@ -2883,6 +2894,12 @@ static int bcmgenet_open(struct net_device *dev)
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+ ret = bcmgenet_mii_config(dev, true);
+ if (ret) {
+ netdev_err(dev, "unsupported PHY\n");
+ goto err_disconnect_phy;
+ }
+
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
@@ -2898,7 +2915,7 @@ static int bcmgenet_open(struct net_device *dev)
ret = bcmgenet_init_dma(priv);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
- goto err_clk_disable;
+ goto err_disconnect_phy;
}
/* Always enable ring 16 - descriptor ring */
@@ -2921,25 +2938,19 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
- ret = bcmgenet_mii_probe(dev);
- if (ret) {
- netdev_err(dev, "failed to connect to PHY\n");
- goto err_irq1;
- }
-
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
return 0;
-err_irq1:
- free_irq(priv->irq1, priv);
err_irq0:
free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_dma_teardown(priv);
bcmgenet_fini_dma(priv);
+err_disconnect_phy:
+ phy_disconnect(dev->phydev);
err_clk_disable:
if (priv->internal_phy)
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3461,16 +3472,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
- if (dn) {
+ if (dn)
macaddr = of_get_mac_address(dn);
- if (IS_ERR(macaddr)) {
- dev_err(&pdev->dev, "can't find MAC address\n");
- err = -EINVAL;
- goto err;
- }
- } else {
+ else
macaddr = pd->mac_address;
- }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -3482,7 +3487,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
- ether_addr_copy(dev->dev_addr, macaddr);
+ if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(dev);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
dev->watchdog_timeo = 2 * HZ;
dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
@@ -3597,6 +3607,11 @@ static int bcmgenet_remove(struct platform_device *pdev)
return 0;
}
+static void bcmgenet_shutdown(struct platform_device *pdev)
+{
+ bcmgenet_remove(pdev);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcmgenet_resume(struct device *d)
{
@@ -3620,6 +3635,8 @@ static int bcmgenet_resume(struct device *d)
if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ phy_init_hw(dev->phydev);
+
bcmgenet_umac_reset(priv);
init_umac(priv);
@@ -3628,8 +3645,6 @@ static int bcmgenet_resume(struct device *d)
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
- phy_init_hw(dev->phydev);
-
/* Speed settings must be restored */
bcmgenet_mii_config(priv->dev, false);
@@ -3715,6 +3730,7 @@ static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
.remove = bcmgenet_remove,
+ .shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 4a8fc03d82fd..7fbf573d8d52 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN_V123 (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
@@ -719,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_connect(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
-int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 970e478a9017..b797a7e59a53 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -173,6 +173,46 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_fixed_phy_link_update);
}
+int bcmgenet_mii_connect(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct phy_device *phydev;
+ u32 phy_flags = 0;
+ int ret;
+
+ /* Communicate the integrated PHY revision */
+ if (priv->internal_phy)
+ phy_flags = priv->gphy_rev;
+
+ /* Initialize link state variables that bcmgenet_mii_setup() uses */
+ priv->old_link = -1;
+ priv->old_speed = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+
+ if (dn) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+ phy_flags, priv->phy_interface);
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = dev->phydev;
+ phydev->dev_flags = phy_flags;
+
+ ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+ if (ret) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -258,74 +298,29 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg |= RGMII_MODE_EN | id_mode_dis;
+ reg |= id_mode_dis;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ reg |= RGMII_MODE_EN_V123;
+ else
+ reg |= RGMII_MODE_EN;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init)
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ if (init) {
+ linkmode_copy(phydev->advertising, phydev->supported);
- return 0;
-}
-
-int bcmgenet_mii_probe(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
- struct phy_device *phydev;
- u32 phy_flags;
- int ret;
-
- /* Communicate the integrated PHY revision */
- phy_flags = priv->gphy_rev;
-
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
- if (dn) {
- phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
- if (!phydev) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- } else {
- phydev = dev->phydev;
- phydev->dev_flags = phy_flags;
-
- ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
- if (ret) {
- pr_err("could not attach to PHY\n");
- return -ENODEV;
- }
- }
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
+ */
+ if (priv->internal_phy && !GENET_IS_V5(priv))
+ phydev->irq = PHY_IGNORE_INTERRUPT;
- /* Configure port multiplexer based on what the probed PHY device since
- * reading the 'max-speed' property determines the maximum supported
- * PHY speed which is needed for bcmgenet_mii_config() to configure
- * things appropriately.
- */
- ret = bcmgenet_mii_config(dev, true);
- if (ret) {
- phy_disconnect(dev->phydev);
- return ret;
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
- /* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
- * that prevents the signaling of link UP interrupts when
- * the link operates at 10Mbps, so fallback to polling for
- * those versions of GENET.
- */
- if (priv->internal_phy && !GENET_IS_V5(priv))
- dev->phydev->irq = PHY_IGNORE_INTERRUPT;
-
return 0;
}
@@ -441,7 +436,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
struct phy_device *phydev;
- int phy_mode;
+ phy_interface_t phy_mode;
int ret;
/* Fetch the PHY phandle */
@@ -459,10 +454,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
}
/* Get the link mode */
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ ret = of_get_phy_mode(dn, &phy_mode);
+ if (ret) {
dev_err(kdev, "invalid PHY mode property\n");
- return phy_mode;
+ return ret;
}
priv->phy_interface = phy_mode;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8e8d557901a9..b884cf7f339b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3405,17 +3405,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
}
- *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
if (IS_ERR(*tx_clk))
- *tx_clk = NULL;
+ return PTR_ERR(*tx_clk);
- *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+ *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
if (IS_ERR(*rx_clk))
- *rx_clk = NULL;
+ return PTR_ERR(*rx_clk);
- *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+ *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
if (IS_ERR(*tsu_clk))
- *tsu_clk = NULL;
+ return PTR_ERR(*tsu_clk);
err = clk_prepare_enable(*pclk);
if (err) {
@@ -4182,6 +4182,7 @@ static int macb_probe(struct platform_device *pdev)
unsigned int queue_mask, num_queues;
bool native_io;
struct phy_device *phydev;
+ phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
@@ -4308,12 +4309,12 @@ static int macb_probe(struct platform_device *pdev)
macb_get_hwaddr(bp);
}
- err = of_get_phy_mode(np);
- if (err < 0)
+ err = of_get_phy_mode(np, &interface);
+ if (err)
/* not found in DT, MII by default */
bp->phy_interface = PHY_INTERFACE_MODE_MII;
else
- bp->phy_interface = err;
+ bp->phy_interface = interface;
/* IP specific init */
err = init(pdev);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f96a42af1014..af04a2c81adb 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1914,10 +1914,10 @@ static struct platform_driver xgmac_driver = {
.driver = {
.name = "calxedaxgmac",
.of_match_table = xgmac_of_match,
+ .pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
.remove = xgmac_remove,
- .driver.pm = &xgmac_pm_ops,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
index be2bafc7beeb..a04eccbc78e8 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.h
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
* Copyright (c) 2003-2015, 2017 Cavium, Inc.
*/
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index acb016834f04..1e09fdb63c4f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1007,14 +1007,14 @@ static void bgx_poll_for_link(struct work_struct *work)
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
- lmac->link_up = 1;
+ lmac->link_up = true;
if (lmac->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = SPEED_40000;
else
lmac->last_speed = SPEED_10000;
lmac->last_duplex = DUPLEX_FULL;
} else {
- lmac->link_up = 0;
+ lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
@@ -1023,7 +1023,7 @@ static void bgx_poll_for_link(struct work_struct *work)
if (lmac->link_up) {
if (bgx_xaui_check_link(lmac)) {
/* Errors, clear link_up state */
- lmac->link_up = 0;
+ lmac->link_up = false;
lmac->last_speed = SPEED_UNKNOWN;
lmac->last_duplex = DUPLEX_UNKNOWN;
}
@@ -1055,11 +1055,11 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
- lmac->is_sgmii = 1;
+ lmac->is_sgmii = true;
if (bgx_lmac_sgmii_init(bgx, lmac))
return -1;
} else {
- lmac->is_sgmii = 0;
+ lmac->is_sgmii = false;
if (bgx_lmac_xaui_init(bgx, lmac))
return -1;
}
@@ -1304,7 +1304,7 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
{
if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
(lmac->lmac_type != BGX_MODE_40G_KR)) {
- lmac->use_training = 0;
+ lmac->use_training = false;
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38024877751c..33a923cfd82e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -184,6 +184,8 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
+static int cfg_queues(struct adapter *adap);
+
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
@@ -4286,14 +4288,14 @@ static struct fw_info *find_fw_info(int chip)
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
-static int adap_init0(struct adapter *adap)
+static int adap_init0(struct adapter *adap, int vpd_skip)
{
- int ret;
- u32 v, port_vec;
- enum dev_state state;
- u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
+ u32 params[7], val[7];
+ enum dev_state state;
+ u32 v, port_vec;
int reset = 1;
+ int ret;
/* Grab Firmware Device Log parameters as early as possible so we have
* access to it for debugging, etc.
@@ -4448,9 +4450,11 @@ static int adap_init0(struct adapter *adap)
* could have FLASHed a new VPD which won't be read by the firmware
* until we do the RESET ...
*/
- ret = t4_get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
+ if (!vpd_skip) {
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
+ if (ret < 0)
+ goto bye;
+ }
/* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@@ -5050,10 +5054,93 @@ static void eeh_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void eeh_reset_prepare(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ if (adapter->pf != 4)
+ return;
+
+ adapter->flags &= ~CXGB4_FW_OK;
+
+ notify_ulds(adapter, CXGB4_STATE_DOWN);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ disable_interrupts(adapter);
+ cxgb4_free_mps_ref_entries(adapter);
+
+ adap_free_hma_mem(adapter);
+
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adapter);
+}
+
+static void eeh_reset_done(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+ int err, i;
+
+ if (adapter->pf != 4)
+ return;
+
+ err = t4_wait_dev_ready(adapter->regs);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev,
+ "Device not ready, err %d", err);
+ return;
+ }
+
+ setup_memwin(adapter);
+
+ err = adap_init0(adapter, 1);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Adapter init failed, err %d", err);
+ return;
+ }
+
+ setup_memwin_rdma(adapter);
+
+ if (adapter->flags & CXGB4_FW_OK) {
+ err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Port init failed, err %d", err);
+ return;
+ }
+ }
+
+ err = cfg_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "Config queues failed, err %d", err);
+ return;
+ }
+
+ cxgb4_init_mps_ref_entries(adapter);
+
+ err = setup_fw_sge_queues(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev,
+ "FW sge queue allocation failed, err %d", err);
+ return;
+ }
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_open(adapter->port[i]);
+}
+
static const struct pci_error_handlers cxgb4_eeh = {
.error_detected = eeh_err_detected,
.slot_reset = eeh_slot_reset,
.resume = eeh_resume,
+ .reset_prepare = eeh_reset_prepare,
+ .reset_done = eeh_reset_done,
};
/* Return true if the Link Configuration supports "High Speeds" (those greater
@@ -5837,7 +5924,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
setup_memwin(adapter);
- err = adap_init0(adapter);
+ err = adap_init0(adapter, 0);
#ifdef CONFIG_DEBUG_FS
bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 5b602243d573..86b528d8364c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
- struct sge *s = &adap->sge;
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
- unsigned int bmap_idx = 0;
+ struct sge *s = &adap->sge;
unsigned int per_chan;
- int i, err, msi_idx, que_idx = 0;
per_chan = rxq_info->nrxq / adap->params.nports;
@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
+ if (bmap_idx < 0) {
+ err = -ENOSPC;
+ goto freeout;
+ }
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
@@ -692,10 +695,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->write_cmpl_support = adap->params.write_cmpl_support;
}
-static void uld_attach(struct adapter *adap, unsigned int uld)
+static int uld_attach(struct adapter *adap, unsigned int uld)
{
- void *handle;
struct cxgb4_lld_info lli;
+ void *handle;
uld_init(adap, &lli);
uld_queue_init(adap, uld, &lli);
@@ -705,7 +708,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
dev_warn(adap->pdev_dev,
"could not attach to the %s driver, error %ld\n",
adap->uld[uld].name, PTR_ERR(handle));
- return;
+ return PTR_ERR(handle);
}
adap->uld[uld].handle = handle;
@@ -713,22 +716,22 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
if (adap->flags & CXGB4_FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+
+ return 0;
}
-/**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
+/* cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
*
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type.
*/
void cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{
- int ret = 0;
struct adapter *adap;
+ int ret = 0;
if (type >= CXGB4_ULD_MAX)
return;
@@ -760,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
if (ret)
goto free_irq;
adap->uld[type] = *p;
- uld_attach(adap, type);
+ ret = uld_attach(adap, type);
+ if (ret)
+ goto free_txq;
continue;
+free_txq:
+ release_sge_txq_uld(adap, type);
free_irq:
if (adap->flags & CXGB4_FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1a407d3c1d67..e9e45006632d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -351,15 +351,13 @@ exists:
static void _t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
- struct sk_buff *skb;
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
- while ((skb = __skb_dequeue(&e->arpq)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&e->arpq);
}
d = container_of(e, struct l2t_data, l2tab[e->idx]);
@@ -370,7 +368,6 @@ static void _t4_l2e_free(struct l2t_entry *e)
static void t4_l2e_free(struct l2t_entry *e)
{
struct l2t_data *d;
- struct sk_buff *skb;
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
@@ -378,8 +375,7 @@ static void t4_l2e_free(struct l2t_entry *e)
neigh_release(e->neigh);
e->neigh = NULL;
}
- while ((skb = __skb_dequeue(&e->arpq)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&e->arpq);
}
spin_unlock_bh(&e->lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b3da81e90132..928bfea5457b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl((dbqt
- ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
- : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+ c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
- ? HOSTFCMODE_INGRESS_QUEUE_X
- : HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
index 0b12f89bf89a..9fdf77d5eb37 100644
--- a/drivers/net/ethernet/cortina/gemini.h
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/* Register definitions for Gemini GMAC Ethernet device driver
*
* Copyright (C) 2006 Storlink, Corp.
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 9b7af94a40bb..a6f2063f1475 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -90,6 +90,9 @@ struct ftgmac100 {
struct mii_bus *mii_bus;
struct clk *clk;
+ /* AST2500/AST2600 RMII ref clock gate */
+ struct clk *rclk;
+
/* Link management */
int cur_speed;
int cur_duplex;
@@ -727,6 +730,18 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
*/
nfrags = skb_shinfo(skb)->nr_frags;
+ /* Setup HW checksumming */
+ csum_vlan = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+ goto drop;
+
+ /* Add VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+ csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+ }
+
/* Get header len */
len = skb_headlen(skb);
@@ -753,19 +768,6 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
if (nfrags == 0)
f_ctl_stat |= FTGMAC100_TXDES0_LTS;
txdes->txdes3 = cpu_to_le32(map);
-
- /* Setup HW checksumming */
- csum_vlan = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL &&
- !ftgmac100_prep_tx_csum(skb, &csum_vlan))
- goto drop;
-
- /* Add VLAN tag */
- if (skb_vlan_tag_present(skb)) {
- csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
- csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
- }
-
txdes->txdes1 = cpu_to_le32(csum_vlan);
/* Next descriptor */
@@ -1610,7 +1612,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
- int phy_intf = PHY_INTERFACE_MODE_RGMII;
+ phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
struct device_node *np = pdev->dev.of_node;
int i, err = 0;
u32 reg;
@@ -1635,8 +1637,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
/* Get PHY mode from device-tree */
if (np) {
/* Default to RGMII. It's a gigabit part after all */
- phy_intf = of_get_phy_mode(np);
- if (phy_intf < 0)
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
phy_intf = PHY_INTERFACE_MODE_RGMII;
/* Aspeed only supports these. I don't know about other IP
@@ -1718,20 +1720,41 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd->link_up ? "up" : "down");
}
-static void ftgmac100_setup_clk(struct ftgmac100 *priv)
+static int ftgmac100_setup_clk(struct ftgmac100 *priv)
{
- priv->clk = devm_clk_get(priv->dev, NULL);
- if (IS_ERR(priv->clk))
- return;
+ struct clk *clk;
+ int rc;
- clk_prepare_enable(priv->clk);
+ clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ priv->clk = clk;
+ rc = clk_prepare_enable(priv->clk);
+ if (rc)
+ return rc;
/* Aspeed specifies a 100MHz clock is required for up to
* 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
* is sufficient
*/
- clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
- FTGMAC_100MHZ);
+ rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
+ FTGMAC_100MHZ);
+ if (rc)
+ goto cleanup_clk;
+
+ /* RCLK is for RMII, typically used for NCSI. Optional because its not
+ * necessary if it's the AST2400 MAC, or the MAC is configured for
+ * RGMII, or the controller is not an ASPEED-based controller.
+ */
+ priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
+ rc = clk_prepare_enable(priv->rclk);
+ if (!rc)
+ return 0;
+
+cleanup_clk:
+ clk_disable_unprepare(priv->clk);
+
+ return rc;
}
static int ftgmac100_probe(struct platform_device *pdev)
@@ -1853,8 +1876,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_setup_mdio;
}
- if (priv->is_aspeed)
- ftgmac100_setup_clk(priv);
+ if (priv->is_aspeed) {
+ err = ftgmac100_setup_clk(priv);
+ if (err)
+ goto err_ncsi_dev;
+ }
/* Default ring sizes */
priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
@@ -1886,8 +1912,10 @@ static int ftgmac100_probe(struct platform_device *pdev)
return 0;
-err_ncsi_dev:
err_register_netdev:
+ clk_disable_unprepare(priv->rclk);
+ clk_disable_unprepare(priv->clk);
+err_ncsi_dev:
ftgmac100_destroy_mdio(netdev);
err_setup_mdio:
iounmap(priv->base);
@@ -1909,6 +1937,7 @@ static int ftgmac100_remove(struct platform_device *pdev)
unregister_netdev(netdev);
+ clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
/* There's a small chance the reset task will have been re-queued,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index b4b82b9c5cd6..6a9d12dad5d9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -178,31 +178,9 @@ struct fm_port_fqs {
/* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
-/* The raw buffer size must be cacheline aligned */
#define DPAA_BP_RAW_SIZE 4096
-/* When using more than one buffer pool, the raw sizes are as follows:
- * 1 bp: 4KB
- * 2 bp: 2KB, 4KB
- * 3 bp: 1KB, 2KB, 4KB
- * 4 bp: 1KB, 2KB, 4KB, 8KB
- */
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
-{
- size_t res = DPAA_BP_RAW_SIZE / 4;
- u8 i;
-
- for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
- res *= 2;
- return res;
-}
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
- * alignment.
- */
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
static int dpaa_max_frm;
@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
/* Allow the Fman (Tx) port to process in-flight frames before we
* try switching it off.
*/
- usleep_range(5000, 10000);
+ msleep(200);
err = mac_dev->stop(mac_dev);
if (err < 0)
@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
phy_disconnect(net_dev->phydev);
net_dev->phydev = NULL;
+ msleep(200);
+
return err;
}
@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
static void dpaa_bps_free(struct dpaa_priv *priv)
{
- int i;
-
- for (i = 0; i < DPAA_BPS_NUM; i++)
- dpaa_bp_free(priv->dpaa_bps[i]);
+ dpaa_bp_free(priv->dpaa_bp);
}
/* Use multiple WQs for FQ assignment:
@@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
qman_release_pool(rx_pool_channel);
}
-static void dpaa_eth_add_channel(u16 channel)
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{
u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
const cpumask_t *cpus = qman_affine_cpus();
@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
for_each_cpu_and(cpu, cpus, cpu_online_mask) {
portal = qman_get_affine_portal(cpu);
qman_p_static_dequeue_add(portal, pool);
+ qman_start_using_portal(portal, dev);
}
}
@@ -901,7 +879,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
if (num_portals == 0)
dev_err(priv->net_dev->dev.parent,
- "No Qman software (affine) channels found");
+ "No Qman software (affine) channels found\n");
/* Initialize each FQ in the list */
list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
return err;
}
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
- size_t count, struct dpaa_fq *errq,
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+ struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_buffer_layout *buf_layout)
{
struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p;
struct fman_port_params params;
- int i, err;
+ int err;
memset(&params, 0, sizeof(params));
memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
}
- count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
- rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
- for (i = 0; i < count; i++) {
- rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
- rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
- }
+ rx_p->ext_buf_pools.num_of_pools_used = 1;
+ rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
+ rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
err = fman_port_config(port, &params);
if (err) {
@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}
static int dpaa_eth_init_ports(struct mac_device *mac_dev,
- struct dpaa_bp **bps, size_t count,
+ struct dpaa_bp *bp,
struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout,
struct device *dev)
@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
if (err)
return err;
- err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+ err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
port_fqs->rx_defq, port_fqs->rx_pcdq,
&buf_layout[RX]);
@@ -1335,15 +1310,16 @@ static void dpaa_fd_release(const struct net_device *net_dev,
vaddr = phys_to_virt(qm_fd_addr(fd));
sgt = vaddr + qm_fd_get_offset(fd);
- dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
dpaa_release_sgt_members(sgt);
- addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dpaa_bp->dev, addr)) {
- dev_err(dpaa_bp->dev, "DMA mapping failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+ virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
+ netdev_err(net_dev, "DMA mapping failed\n");
return;
}
bm_buffer_set64(&bmb, addr);
@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
struct sk_buff *skb,
struct qm_fd *fd,
- char *parse_results)
+ void *parse_results)
{
struct fman_prs_result *parse_result;
u16 ethertype = ntohs(skb->protocol);
@@ -1488,25 +1464,24 @@ return_error:
static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
{
- struct device *dev = dpaa_bp->dev;
+ struct net_device *net_dev = dpaa_bp->priv->net_dev;
struct bm_buffer bmb[8];
dma_addr_t addr;
- void *new_buf;
+ struct page *p;
u8 i;
for (i = 0; i < 8; i++) {
- new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
- if (unlikely(!new_buf)) {
- dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
- dpaa_bp->raw_size);
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
goto release_previous_buffs;
}
- new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
- addr = dma_map_single(dev, new_buf,
- dpaa_bp->size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dpaa_bp->dev, "DMA map failed");
+ addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
+ addr))) {
+ netdev_err(net_dev, "DMA map failed\n");
goto release_previous_buffs;
}
@@ -1581,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{
struct dpaa_bp *dpaa_bp;
int *countptr;
- int res, i;
+ int res;
+
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp)
+ return -EINVAL;
+ countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+ res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+ if (res)
+ return res;
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bp = priv->dpaa_bps[i];
- if (!dpaa_bp)
- return -EINVAL;
- countptr = this_cpu_ptr(dpaa_bp->percpu_count);
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
- if (res)
- return res;
- }
return 0;
}
@@ -1600,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
* Skb freeing is not handled here.
*
* This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
+ * against cases when not all fd relevant fields were filled in. To avoid
+ * reading the invalid transmission timestamp for the error paths set ts to
+ * false.
*
* Return the skb backpointer, since for S/G frames the buffer containing it
* gets freed here.
*/
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
- const struct qm_fd *fd)
+ const struct qm_fd *fd, bool ts)
{
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
struct device *dev = priv->net_dev->dev.parent;
struct skb_shared_hwtstamps shhwtstamps;
dma_addr_t addr = qm_fd_addr(fd);
+ void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt;
- struct sk_buff **skbh, *skb;
- int nr_frags, i;
+ struct sk_buff *skb;
u64 ns;
-
- skbh = (struct sk_buff **)phys_to_virt(addr);
- skb = *skbh;
-
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
- &ns)) {
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else {
- dev_warn(dev, "fman_port_get_tstamp failed!\n");
- }
- }
+ int i;
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dev, addr,
- qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
- dma_dir);
+ dma_unmap_page(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+ dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
* it's from lowmem.
*/
- sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+ sgt = vaddr + qm_fd_get_offset(fd);
/* sgt[0] is from lowmem, was dma_map_single()-ed */
- dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
+ dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i <= nr_frags; i++) {
+ for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+ !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
- dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
qm_sg_entry_get_len(&sgt[i]), dma_dir);
}
-
- /* Free the page frag that we allocated on Tx */
- skb_free_frag(phys_to_virt(addr));
} else {
- dma_unmap_single(dev, addr,
- skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ priv->tx_headroom + qm_fd_get_length(fd),
+ dma_dir);
+ }
+
+ skb = *(struct sk_buff **)vaddr;
+
+ /* DMA unmapping is required before accessing the HW provided info */
+ if (ts && priv->tx_tstamp &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
+ &ns)) {
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else {
+ dev_warn(dev, "fman_port_get_tstamp failed!\n");
+ }
}
+ if (qm_fd_get_format(fd) == qm_fd_sg)
+ /* Free the page that we allocated on Tx for the SGT */
+ free_pages((unsigned long)vaddr, 0);
+
return skb;
}
@@ -1715,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
return skb;
free_buffer:
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1762,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
goto free_buffers;
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
- dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(priv->rx_dma_dev, sg_addr,
+ DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1815,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
/* free the SG table buffer */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return skb;
@@ -1832,7 +1812,7 @@ free_buffers:
for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
sg_addr = qm_sg_addr(&sgt[i]);
sg_vaddr = phys_to_virt(sg_addr);
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
if (dpaa_bp) {
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -1843,7 +1823,7 @@ free_buffers:
break;
}
/* free the SGT fragment */
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
return NULL;
}
@@ -1853,9 +1833,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
int *offset)
{
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
enum dma_data_direction dma_dir;
- unsigned char *buffer_start;
+ unsigned char *buff_start;
struct sk_buff **skbh;
dma_addr_t addr;
int err;
@@ -1864,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* available, so just use that for offset.
*/
fd->bpid = FSL_DPAA_BPID_INV;
- buffer_start = skb->data - priv->tx_headroom;
+ buff_start = skb->data - priv->tx_headroom;
dma_dir = DMA_TO_DEVICE;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
/* Enable L3/L4 hardware checksum computation.
@@ -1876,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1889,9 +1868,9 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
/* Map the entire buffer size that may be seen by FMan, but no more */
- addr = dma_map_single(dev, skbh,
- skb_tail_pointer(skb) - buffer_start, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ priv->tx_headroom + skb->len, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
return -EINVAL;
@@ -1907,24 +1886,22 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
const int nr_frags = skb_shinfo(skb)->nr_frags;
struct net_device *net_dev = priv->net_dev;
- struct device *dev = net_dev->dev.parent;
struct qm_sg_entry *sgt;
struct sk_buff **skbh;
- int i, j, err, sz;
- void *buffer_start;
+ void *buff_start;
skb_frag_t *frag;
dma_addr_t addr;
size_t frag_len;
- void *sgt_buf;
-
- /* get a page frag to store the SGTable */
- sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
- sgt_buf = netdev_alloc_frag(sz);
- if (unlikely(!sgt_buf)) {
- netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
- sz);
+ struct page *p;
+ int i, j, err;
+
+ /* get a page to store the SGTable */
+ p = dev_alloc_pages(0);
+ if (unlikely(!p)) {
+ netdev_err(net_dev, "dev_alloc_pages() failed\n");
return -ENOMEM;
}
+ buff_start = page_address(p);
/* Enable L3/L4 hardware checksum computation.
*
@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
* need to write into the skb.
*/
err = dpaa_enable_tx_csum(priv, skb, fd,
- sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+ buff_start + DPAA_TX_PRIV_DATA_SIZE);
if (unlikely(err < 0)) {
if (net_ratelimit())
netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1941,15 +1918,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
}
/* SGT[0] is used by the linear part */
- sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+ sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
frag_len = skb_headlen(skb);
qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV;
sgt[0].offset = 0;
- addr = dma_map_single(dev, skb->data,
+ addr = dma_map_single(priv->tx_dma_dev, skb->data,
skb_headlen(skb), dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg0_map_failed;
}
@@ -1960,10 +1937,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
- addr = skb_frag_dma_map(dev, frag, 0,
+ addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
frag_len, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sg_map_failed;
}
@@ -1979,17 +1956,17 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* Set the final bit in the last used entry of the SGT */
qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
+ /* set fd offset to priv->tx_headroom */
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */
- buffer_start = (void *)sgt - priv->tx_headroom;
- skbh = (struct sk_buff **)buffer_start;
+ skbh = (struct sk_buff **)buff_start;
*skbh = skb;
- addr = dma_map_single(dev, buffer_start,
- priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
- if (unlikely(dma_mapping_error(dev, addr))) {
- dev_err(dev, "DMA mapping failed");
+ addr = dma_map_page(priv->tx_dma_dev, p, 0,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ netdev_err(priv->net_dev, "DMA mapping failed\n");
err = -EINVAL;
goto sgt_map_failed;
}
@@ -2003,11 +1980,11 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
sgt_map_failed:
sg_map_failed:
for (j = 0; j < i; j++)
- dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
+ dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed:
csum_failed:
- skb_free_frag(sgt_buf);
+ free_pages((unsigned long)buff_start, 0);
return err;
}
@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
return NETDEV_TX_OK;
- dpaa_cleanup_tx_fd(priv, &fd);
+ dpaa_cleanup_tx_fd(priv, &fd, false);
skb_to_fd_failed:
enomem:
percpu_stats->tx_errors++;
@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
percpu_priv->stats.tx_errors++;
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb(skb);
}
@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
percpu_priv->tx_confirm++;
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, true);
consume_skb(skb);
}
@@ -2304,11 +2281,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume;
}
- dpaa_bp = dpaa_bpid2pool(fd->bpid);
- if (!dpaa_bp)
- return qman_cb_dqrr_consume;
-
- dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
/* prefetch the first 64 bytes of the frame or the SGT start */
vaddr = phys_to_virt(addr);
@@ -2430,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
percpu_priv->stats.tx_fifo_errors++;
count_ern(percpu_priv, msg);
- skb = dpaa_cleanup_tx_fd(priv, fd);
+ skb = dpaa_cleanup_tx_fd(priv, fd, false);
dev_kfree_skb_any(skb);
}
@@ -2663,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
{
dma_addr_t addr = bm_buf_addr(bmb);
- dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
+ dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+ DMA_FROM_DEVICE);
skb_free_frag(phys_to_virt(addr));
}
@@ -2764,21 +2739,46 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
static int dpaa_eth_probe(struct platform_device *pdev)
{
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
struct net_device *net_dev = NULL;
+ struct dpaa_bp *dpaa_bp = NULL;
struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL;
struct fm_port_fqs port_fqs;
struct mac_device *mac_dev;
- int err = 0, i, channel;
+ int err = 0, channel;
struct device *dev;
- /* device used for DMA mapping */
- dev = pdev->dev.parent;
- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (err) {
- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
- return err;
+ dev = &pdev->dev;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+ err = bman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to bman portals probe error\n");
+ return -ENODEV;
+ }
+ err = qman_portals_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(dev,
+ "failing probe due to qman portals probe error\n");
+ return -ENODEV;
}
/* Allocate this early, so we can store relevant information in
@@ -2801,11 +2801,23 @@ static int dpaa_eth_probe(struct platform_device *pdev)
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
- dev_err(dev, "dpaa_mac_dev_get() failed\n");
+ netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
goto free_netdev;
}
+ /* Devices used for DMA mapping */
+ priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
+ priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
+ err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
+ if (!err)
+ err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
+ DMA_BIT_MASK(40));
+ if (err) {
+ netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
+ return err;
+ }
+
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
* we choose conservatively and let the user explicitly set a higher
* MTU via ifconfig. Otherwise, the user may end up with different MTUs
@@ -2822,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* bp init */
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- dpaa_bps[i] = dpaa_bp_alloc(dev);
- if (IS_ERR(dpaa_bps[i])) {
- err = PTR_ERR(dpaa_bps[i]);
- goto free_dpaa_bps;
- }
- /* the raw size of the buffers used for reception */
- dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
- /* avoid runtime computations by keeping the usable size here */
- dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
- dpaa_bps[i]->dev = dev;
-
- err = dpaa_bp_alloc_pool(dpaa_bps[i]);
- if (err < 0)
- goto free_dpaa_bps;
- priv->dpaa_bps[i] = dpaa_bps[i];
+ dpaa_bp = dpaa_bp_alloc(dev);
+ if (IS_ERR(dpaa_bp)) {
+ err = PTR_ERR(dpaa_bp);
+ goto free_dpaa_bps;
}
+ /* the raw size of the buffers used for reception */
+ dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+ /* avoid runtime computations by keeping the usable size here */
+ dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+ dpaa_bp->priv = priv;
+
+ err = dpaa_bp_alloc_pool(dpaa_bp);
+ if (err < 0)
+ goto free_dpaa_bps;
+ priv->dpaa_bp = dpaa_bp;
INIT_LIST_HEAD(&priv->dpaa_fq_list);
@@ -2864,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
- dpaa_eth_add_channel(priv->channel);
+ dpaa_eth_add_channel(priv->channel, &pdev->dev);
dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
@@ -2896,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */
- err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
&priv->buf_layout[0], dev);
if (err)
goto free_dpaa_fqs;
@@ -2955,7 +2965,7 @@ static int dpaa_remove(struct platform_device *pdev)
struct device *dev;
int err;
- dev = pdev->dev.parent;
+ dev = &pdev->dev;
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index f7e59e8db075..fc2cc4c48e06 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -47,8 +47,6 @@
/* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
-#define DPAA_BPS_NUM 3 /* number of bpools per interface */
-
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
@@ -80,9 +78,11 @@ struct dpaa_fq_cbs {
struct qman_fq egress_ern;
};
+struct dpaa_priv;
+
struct dpaa_bp {
- /* device used in the DMA mapping operations */
- struct device *dev;
+ /* used in the DMA mapping operations */
+ struct dpaa_priv *priv;
/* current number of buffers in the buffer pool alloted to each CPU */
int __percpu *percpu_count;
/* all buffers allocated for this pool have this raw size */
@@ -146,13 +146,15 @@ struct dpaa_buffer_layout {
struct dpaa_priv {
struct dpaa_percpu_priv __percpu *percpu_priv;
- struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
+ struct dpaa_bp *dpaa_bp;
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
u16 tx_headroom;
struct net_device *net_dev;
struct mac_device *mac_dev;
+ struct device *rx_dma_dev;
+ struct device *tx_dma_dev;
struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 0d9b185e317f..ee62d25cac81 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
{
struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
ssize_t bytes = 0;
- int i = 0;
- for (i = 0; i < DPAA_BPS_NUM; i++)
- bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
- priv->dpaa_bps[i]->bpid);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+ priv->dpaa_bp->bpid);
return bytes;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 7ce2e99b594d..66d150872d48 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
"tx S/G",
"tx error",
"rx error",
+ "rx dropped",
+ "tx dropped",
};
static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- if (!net_dev->phydev) {
- netdev_dbg(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return 0;
- }
phy_ethtool_ksettings_get(net_dev->phydev, cmd);
@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
{
int err;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return -ENODEV;
- }
err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if (err < 0)
@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev)
{
int err;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return -ENODEV;
- }
err = 0;
if (net_dev->phydev->autoneg) {
@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
- if (!net_dev->phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
+ if (!net_dev->phydev)
return;
- }
epause->autoneg = mac_dev->autoneg_pause;
epause->rx_pause = mac_dev->rx_pause_active;
@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
unsigned int total_stats, num_stats;
num_stats = num_online_cpus() + 1;
- total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
+ total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
DPAA_STATS_GLOBAL_LEN;
switch (type) {
@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
}
static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
- int crr_cpu, u64 *bp_count, u64 *data)
+ int crr_cpu, u64 bp_count, u64 *data)
{
int num_values = num_cpus + 1;
- int crr = 0, j;
+ int crr = 0;
/* update current CPU's stats and also add them to the total values */
data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
- for (j = 0; j < DPAA_BPS_NUM; j++) {
- data[crr * num_values + crr_cpu] = bp_count[j];
- data[crr++ * num_values + num_cpus] += bp_count[j];
- }
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
+
+ data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
+ data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
+
+ data[crr * num_values + crr_cpu] = bp_count;
+ data[crr++ * num_values + num_cpus] += bp_count;
}
static void dpaa_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats, u64 *data)
{
- u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
struct dpaa_percpu_priv *percpu_priv;
struct dpaa_rx_errors rx_errors;
unsigned int num_cpus, offset;
+ u64 bp_count, cg_time, cg_num;
struct dpaa_ern_cnt ern_cnt;
struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv;
- int total_stats, i, j;
+ int total_stats, i;
bool cg_status;
total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- for (j = 0; j < DPAA_BPS_NUM; j++) {
- dpaa_bp = priv->dpaa_bps[j];
- if (!dpaa_bp->percpu_count)
- continue;
- bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
- }
+ dpaa_bp = priv->dpaa_bp;
+ if (!dpaa_bp->percpu_count)
+ continue;
+ bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
rx_errors.dme += percpu_priv->rx_errors.dme;
rx_errors.fpe += percpu_priv->rx_errors.fpe;
rx_errors.fse += percpu_priv->rx_errors.fse;
@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
copy_stats(percpu_priv, num_cpus, i, bp_count, data);
}
- offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
+ offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
- for (i = 0; i < DPAA_BPS_NUM; i++) {
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN,
- "bpool %c [CPU %d]", 'a' + i, j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
- 'a' + i);
+ for (j = 0; j < num_cpus; j++) {
+ snprintf(string_cpu, ETH_GSTRING_LEN,
+ "bpool [CPU %d]", j);
memcpy(strings, string_cpu, ETH_GSTRING_LEN);
strings += ETH_GSTRING_LEN;
}
+ snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
+ memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+ strings += ETH_GSTRING_LEN;
+
memcpy(strings, dpaa_stats_global, size);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index d1e78cdd512f..69184ca3b7b9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 162d7d8fb295..c26c0a7cbb6b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2019 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -221,6 +221,7 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
dma_addr_t addr)
{
+ int retries = 0;
int err;
ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
@@ -229,8 +230,11 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
ch->xdp.drop_bufs,
- ch->xdp.drop_cnt)) == -EBUSY)
+ ch->xdp.drop_cnt)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
cpu_relax();
+ }
if (err) {
free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
@@ -458,7 +462,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
struct dpaa2_eth_fq *fq = NULL;
struct dpaa2_dq *dq;
const struct dpaa2_fd *fd;
- int cleaned = 0;
+ int cleaned = 0, retries = 0;
int is_last;
do {
@@ -469,6 +473,11 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
* the store until we get some sort of valid response
* token (either a valid frame or an "empty dequeue")
*/
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
+ netdev_err_once(priv->net_dev,
+ "Unable to read a valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
continue;
}
@@ -477,6 +486,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fq->consume(priv, ch, fd, fq);
cleaned++;
+ retries = 0;
} while (!is_last);
if (!cleaned)
@@ -949,6 +959,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
struct page *page;
dma_addr_t addr;
+ int retries = 0;
int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
@@ -980,8 +991,11 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
release_bufs:
/* In case the portal is busy, retry until successful */
while ((err = dpaa2_io_service_release(ch->dpio, bpid,
- buf_array, i)) == -EBUSY)
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ break;
cpu_relax();
+ }
/* If release command failed, clean up and bail out;
* not much else we can do about it
@@ -1032,16 +1046,21 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ int retries = 0;
int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
buf_array, count);
if (ret < 0) {
+ if (ret == -EBUSY &&
+ retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+ continue;
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
free_bufs(priv, buf_array, ret);
+ retries = 0;
} while (ret);
}
@@ -1094,7 +1113,7 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
ch->store);
dequeues++;
cpu_relax();
- } while (err == -EBUSY);
+ } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
ch->stats.dequeue_portal_busy += dequeues;
if (unlikely(err))
@@ -1118,6 +1137,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct netdev_queue *nq;
int store_cleaned, work_done;
struct list_head rx_list;
+ int retries = 0;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -1136,7 +1156,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
refill_pool(priv, ch, priv->bpid);
store_cleaned = consume_frames(ch, &fq);
- if (!store_cleaned)
+ if (store_cleaned <= 0)
break;
if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
@@ -1163,7 +1183,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
do {
err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
cpu_relax();
- } while (err == -EBUSY);
+ } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
@@ -1256,6 +1276,12 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
!!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
+ */
+ if (priv->mac)
+ goto out;
+
/* Chech link state; speed / duplex changes are not treated yet */
if (priv->link_state.up == state.up)
goto out;
@@ -1292,17 +1318,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- /* We'll only start the txqs when the link is actually ready; make sure
- * we don't race against the link up notification, which may come
- * immediately after dpni_enable();
- */
- netif_tx_stop_all_queues(net_dev);
+ if (!priv->mac) {
+ /* We'll only start the txqs when the link is actually ready;
+ * make sure we don't race against the link up notification,
+ * which may come immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+
+ /* Also, explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+ }
enable_ch_napi(priv);
- /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
- * return true and cause 'ip link show' to report the LOWER_UP flag,
- * even though the link notification wasn't even received.
- */
- netif_carrier_off(net_dev);
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
@@ -1310,13 +1340,17 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- /* If the DPMAC object has already processed the link up interrupt,
- * we have to learn the link state ourselves.
- */
- err = link_state_update(priv);
- if (err < 0) {
- netdev_err(net_dev, "Can't update link state\n");
- goto link_state_err;
+ if (!priv->mac) {
+ /* If the DPMAC object has already processed the link up
+ * interrupt, we have to learn the link state ourselves.
+ */
+ err = link_state_update(priv);
+ if (err < 0) {
+ netdev_err(net_dev, "Can't update link state\n");
+ goto link_state_err;
+ }
+ } else {
+ phylink_start(priv->mac->phylink);
}
return 0;
@@ -1391,8 +1425,12 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- netif_tx_stop_all_queues(net_dev);
- netif_carrier_off(net_dev);
+ if (!priv->mac) {
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+ } else {
+ phylink_stop(priv->mac->phylink);
+ }
/* On dpni_disable(), the MC firmware will:
* - stop MAC Rx and wait for all Rx frames to be enqueued to software
@@ -2043,7 +2081,6 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpcon;
struct device *dev = priv->net_dev->dev.parent;
- struct dpcon_attr attrs;
int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
@@ -2068,12 +2105,6 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
goto close;
}
- err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
- if (err) {
- dev_err(dev, "dpcon_get_attributes() failed\n");
- goto close;
- }
-
err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_enable() failed\n");
@@ -2533,6 +2564,47 @@ static int set_pause(struct dpaa2_eth_priv *priv)
return 0;
}
+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_queue_id qid = {0};
+ struct dpaa2_eth_fq *fq;
+ struct dpni_queue queue;
+ int i, j, err;
+
+ /* We only use Tx FQIDs for FQID-based enqueue, so check
+ * if DPNI version supports it before updating FQIDs
+ */
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ return;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ if (fq->type != DPAA2_TX_CONF_FQ)
+ continue;
+ for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, j, fq->flowid,
+ &queue, &qid);
+ if (err)
+ goto out_err;
+
+ fq->tx_fqid[j] = qid.fqid;
+ if (fq->tx_fqid[j] == 0)
+ goto out_err;
+ }
+ }
+
+ priv->enqueue = dpaa2_eth_enqueue_fq;
+
+ return;
+
+out_err:
+ netdev_info(priv->net_dev,
+ "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -3288,12 +3360,56 @@ static int poll_link_state(void *arg)
return 0;
}
+static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpni_dev, *dpmac_dev;
+ struct dpaa2_mac *mac;
+ int err;
+
+ dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
+ dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ return 0;
+
+ if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ return 0;
+
+ mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
+ if (!mac)
+ return -ENOMEM;
+
+ mac->mc_dev = dpmac_dev;
+ mac->mc_io = priv->mc_io;
+ mac->net_dev = priv->net_dev;
+
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ kfree(mac);
+ return err;
+ }
+ priv->mac = mac;
+
+ return 0;
+}
+
+static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
+{
+ if (!priv->mac)
+ return;
+
+ dpaa2_mac_disconnect(priv->mac);
+ kfree(priv->mac);
+ priv->mac = NULL;
+}
+
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{
u32 status = ~0;
struct device *dev = (struct device *)arg;
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -3306,6 +3422,18 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
+ if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
+ set_mac_addr(netdev_priv(net_dev));
+ update_tx_fqids(priv);
+
+ rtnl_lock();
+ if (priv->mac)
+ dpaa2_eth_disconnect_mac(priv);
+ else
+ dpaa2_eth_connect_mac(priv);
+ rtnl_unlock();
+ }
+
return IRQ_HANDLED;
}
@@ -3331,7 +3459,8 @@ static int setup_irqs(struct fsl_mc_device *ls_dev)
}
err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
- DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+ DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
if (err < 0) {
dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
goto free_irq;
@@ -3479,6 +3608,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->do_link_poll = true;
}
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
@@ -3493,6 +3626,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
return 0;
err_netdev_reg:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
@@ -3535,6 +3670,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+ rtnl_lock();
+ dpaa2_eth_disconnect_mac(priv);
+ rtnl_unlock();
+
unregister_netdev(net_dev);
if (priv->do_link_poll)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 8a0e65b3267f..7635db3ef903 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -17,6 +17,7 @@
#include "dpaa2-eth-trace.h"
#include "dpaa2-eth-debugfs.h"
+#include "dpaa2-mac.h"
#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
@@ -245,6 +246,14 @@ static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
*/
#define DPAA2_ETH_ENQUEUE_RETRIES 10
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_ETH_SWP_BUSY_RETRIES 1000
+
/* Driver statistics, other than those in struct rtnl_link_stats64.
* These are usually collected per-CPU and aggregated by ethtool.
*/
@@ -407,6 +416,8 @@ struct dpaa2_eth_priv {
#ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg;
#endif
+
+ struct dpaa2_mac *mac;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 0aa1c34019bb..0883620631b8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,6 +85,10 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ if (priv->mac)
+ return phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+
link_settings->base.autoneg = AUTONEG_DISABLE;
if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
link_settings->base.duplex = DUPLEX_FULL;
@@ -93,12 +97,29 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
return 0;
}
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->mac)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+}
+
static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *pause)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
+ if (priv->mac) {
+ phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ return;
+ }
+
pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
pause->tx_pause = pause->rx_pause ^
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
@@ -118,6 +139,9 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
+ if (priv->mac)
+ return phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -216,7 +240,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
if (err == -EINVAL)
/* Older firmware versions don't support all pages */
memset(&dpni_stats, 0, sizeof(dpni_stats));
- else
+ else if (err)
netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
num_cnt = dpni_stats_page_size[j] / sizeof(u64);
@@ -728,6 +752,7 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
.get_pauseparam = dpaa2_eth_get_pauseparam,
.set_pauseparam = dpaa2_eth_set_pauseparam,
.get_sset_count = dpaa2_eth_get_sset_count,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
new file mode 100644
index 000000000000..b713739f4804
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include "dpaa2-eth.h"
+#include "dpaa2-mac.h"
+
+#define phylink_to_dpaa2_mac(config) \
+ container_of((config), struct dpaa2_mac, phylink_config)
+
+static phy_interface_t phy_mode(enum dpmac_eth_if eth_if)
+{
+ switch (eth_if) {
+ case DPMAC_ETH_IF_RGMII:
+ return PHY_INTERFACE_MODE_RGMII;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Caller must call of_node_put on the returned value */
+static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
+{
+ struct device_node *dpmacs, *dpmac = NULL;
+ u32 id;
+ int err;
+
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+
+ while ((dpmac = of_get_next_child(dpmacs, dpmac)) != NULL) {
+ err = of_property_read_u32(dpmac, "reg", &id);
+ if (err)
+ continue;
+ if (id == dpmac_id)
+ break;
+ }
+
+ of_node_put(dpmacs);
+
+ return dpmac;
+}
+
+static int dpaa2_mac_get_if_mode(struct device_node *node,
+ struct dpmac_attr attr)
+{
+ phy_interface_t if_mode;
+ int err;
+
+ err = of_get_phy_mode(node, &if_mode);
+ if (!err)
+ return if_mode;
+
+ if_mode = phy_mode(attr.eth_if);
+ if (if_mode >= 0)
+ return if_mode;
+
+ return -ENODEV;
+}
+
+static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return (interface != mac->if_mode);
+ default:
+ return true;
+ }
+}
+
+static void dpaa2_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
+ goto empty_set;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ default:
+ goto empty_set;
+ }
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+
+ return;
+
+empty_set:
+ linkmode_zero(supported);
+}
+
+static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ if (state->speed != SPEED_UNKNOWN)
+ dpmac_state->rate = state->speed;
+
+ if (state->duplex != DUPLEX_UNKNOWN) {
+ if (!state->duplex)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+ }
+
+ if (state->an_enabled)
+ dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
+
+ if (state->pause & MLO_PAUSE_RX)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (!!(state->pause & MLO_PAUSE_RX) ^ !!(state->pause & MLO_PAUSE_TX))
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 1;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static void dpaa2_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
+ struct dpmac_link_state *dpmac_state = &mac->state;
+ int err;
+
+ dpmac_state->up = 0;
+ err = dpmac_set_link_state(mac->mc_io, 0,
+ mac->mc_dev->mc_handle, dpmac_state);
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+}
+
+static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
+ .validate = dpaa2_mac_validate,
+ .mac_config = dpaa2_mac_config,
+ .mac_link_up = dpaa2_mac_link_up,
+ .mac_link_down = dpaa2_mac_link_down,
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io)
+{
+ struct dpmac_attr attr;
+ bool fixed = false;
+ u16 mc_handle = 0;
+ int err;
+
+ err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
+ &mc_handle);
+ if (err || !mc_handle)
+ return false;
+
+ err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
+ if (err)
+ goto out;
+
+ if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
+ fixed = true;
+
+out:
+ dpmac_close(mc_io, 0, mc_handle);
+
+ return fixed;
+}
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ struct device_node *dpmac_node;
+ struct phylink *phylink;
+ struct dpmac_attr attr;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpmac_node = dpaa2_mac_get_node(attr.id);
+ if (!dpmac_node) {
+ netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
+ err = -ENODEV;
+ goto err_close_dpmac;
+ }
+
+ err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ if (err < 0) {
+ err = -EINVAL;
+ goto err_put_node;
+ }
+ mac->if_mode = err;
+
+ /* The MAC does not have the capability to add RGMII delays so
+ * error out if the interface mode requests them and there is no PHY
+ * to act upon them
+ */
+ if (of_phy_is_fixed_link(dpmac_node) &&
+ (mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_err(net_dev, "RGMII delay not supported\n");
+ err = -EINVAL;
+ goto err_put_node;
+ }
+
+ mac->phylink_config.dev = &net_dev->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(dpmac_node), mac->if_mode,
+ &dpaa2_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_node;
+ }
+ mac->phylink = phylink;
+
+ err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
+ if (err) {
+ netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
+ goto err_phylink_destroy;
+ }
+
+ of_node_put(dpmac_node);
+
+ return 0;
+
+err_phylink_destroy:
+ phylink_destroy(mac->phylink);
+err_put_node:
+ of_node_put(dpmac_node);
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
+{
+ if (!mac->phylink)
+ return;
+
+ phylink_disconnect_phy(mac->phylink);
+ phylink_destroy(mac->phylink);
+ dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
new file mode 100644
index 000000000000..8634d0de7ef3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+#ifndef DPAA2_MAC_H
+#define DPAA2_MAC_H
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phylink.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac {
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_link_state state;
+ struct net_device *net_dev;
+ struct fsl_mc_io *mc_io;
+
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+ phy_interface_t if_mode;
+};
+
+bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+ struct fsl_mc_io *mc_io);
+
+int dpaa2_mac_connect(struct dpaa2_mac *mac);
+
+void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
index ff2e177395d4..df2458a5e9ef 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 NXP
*/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
new file mode 100644
index 000000000000..96a9b0d0992e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 4
+#define DPMAC_CMD_BASE_VERSION 1
+#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+
+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field) \
+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+ DPMAC_##field##_SHIFT)
+
+#define dpmac_set_field(var, field, val) \
+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field) \
+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+ __le32 dpmac_id;
+};
+
+struct dpmac_rsp_get_attributes {
+ u8 eth_if;
+ u8 link_type;
+ __le16 id;
+ __le32 max_rate;
+};
+
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ __le64 options;
+ __le32 rate;
+ __le32 pad0;
+ /* from lsb: up:1, state_valid:1 */
+ u8 state;
+ u8 pad1[7];
+ __le64 supported;
+ __le64 advertising;
+};
+
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
new file mode 100644
index 000000000000..b75189deffb1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id: DPMAC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+ attr->eth_if = rsp_params->eth_if;
+ attr->link_type = rsp_params->link_type;
+ attr->id = le16_to_cpu(rsp_params->id);
+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
+ dpmac_set_field(cmd_params->state, STATE_VALID,
+ link_state->state_valid);
+ cmd_params->supported = cpu_to_le64(link_state->supported);
+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
new file mode 100644
index 000000000000..4efc410a479e
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpmac_link_type - DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+ DPMAC_LINK_TYPE_NONE,
+ DPMAC_LINK_TYPE_FIXED,
+ DPMAC_LINK_TYPE_PHY,
+ DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ * @DPMAC_ETH_IF_CAUI: CAUI interface
+ * @DPMAC_ETH_IF_1000BASEX: 1000BASEX interface
+ * @DPMAC_ETH_IF_USXGMII: USXGMII interface
+ */
+enum dpmac_eth_if {
+ DPMAC_ETH_IF_MII,
+ DPMAC_ETH_IF_RMII,
+ DPMAC_ETH_IF_SMII,
+ DPMAC_ETH_IF_GMII,
+ DPMAC_ETH_IF_RGMII,
+ DPMAC_ETH_IF_SGMII,
+ DPMAC_ETH_IF_QSGMII,
+ DPMAC_ETH_IF_XAUI,
+ DPMAC_ETH_IF_XFI,
+ DPMAC_ETH_IF_CAUI,
+ DPMAC_ETH_IF_1000BASEX,
+ DPMAC_ETH_IF_USXGMII,
+};
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id: DPMAC object ID
+ * @max_rate: Maximum supported rate - in Mbps
+ * @eth_if: Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+ u16 id;
+ u32 max_rate;
+ enum dpmac_eth_if eth_if;
+ enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr);
+
+/**
+ * DPMAC link configuration/state options
+ */
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
+/**
+ * Enable half-duplex mode
+ */
+#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
+/**
+ * Enable pause frames
+ */
+#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
+
+/**
+ * Advertised link speeds
+ */
+#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
+#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
+#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
+#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
+#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
+
+/**
+ * Advertise auto-negotiation enable
+ */
+#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+ int state_valid;
+ u64 supported;
+ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index fd583911b6c0..ee0711d06b3a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -133,9 +133,12 @@ int dpni_reset(struct fsl_mc_io *mc_io,
*/
#define DPNI_IRQ_INDEX 0
/**
- * IRQ event - indicates a change in link state
+ * IRQ events:
+ * indicates a change in link state
+ * indicates a change in endpoint
*/
#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
index 720cd50f5895..4ac05bfef338 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index be7914c1634d..311c184e1aef 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index b73421c3e25b..7da79b816416 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -784,8 +784,8 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
}
}
- priv->if_mode = of_get_phy_mode(np);
- if ((int)priv->if_mode < 0) {
+ err = of_get_phy_mode(np, &priv->if_mode);
+ if (err) {
dev_err(priv->dev, "missing phy type\n");
of_node_put(priv->phy_node);
if (of_phy_is_fixed_link(np))
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4d4c72adf49..d4d6c2e941f1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2706,7 +2706,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_tx_queues; q++) {
txq = fep->tx_queue[q];
- bdp = txq->bd.base;
for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
@@ -3394,6 +3393,7 @@ fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
struct fec_platform_data *pdata;
+ phy_interface_t interface;
struct net_device *ndev;
int i, irq, ret = 0;
const struct of_device_id *of_id;
@@ -3466,15 +3466,15 @@ fec_probe(struct platform_device *pdev)
}
fep->phy_node = phy_node;
- ret = of_get_phy_mode(pdev->dev.of_node);
- if (ret < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &interface);
+ if (ret) {
pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
fep->phy_interface = PHY_INTERFACE_MODE_MII;
} else {
- fep->phy_interface = ret;
+ fep->phy_interface = interface;
}
fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -3558,7 +3558,7 @@ fec_probe(struct platform_device *pdev)
for (i = 0; i < irq_cnt; i++) {
snprintf(irq_name, sizeof(irq_name), "int%d", i);
- irq = platform_get_irq_byname(pdev, irq_name);
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
if (irq < 0)
irq = platform_get_irq(pdev, i);
if (irq < 0) {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 19e2365be7d8..945643c02615 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -600,9 +600,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- irq = platform_get_irq_byname(pdev, "pps");
+ irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
- irq = platform_get_irq(pdev, irq_idx);
+ irq = platform_get_irq_optional(pdev, irq_idx);
/* Failure to get an irq is not fatal,
* only the PTP_CLOCK_PPS clock events should stop
*/
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 210749bf1eac..934111def0be 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -634,6 +634,9 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
{
u32 tmp;
+ iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ return;
/* set LIODN base for this port */
tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
if (port_id % 2) {
@@ -644,7 +647,6 @@ static void set_port_liodn(struct fman *fman, u8 port_id,
tmp |= liodn_base << DMA_LIODN_SHIFT;
}
iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
- iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
}
static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
@@ -1942,6 +1944,8 @@ static int fman_init(struct fman *fman)
fman->liodn_offset[i] =
ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ continue;
liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
if (i % 2) {
/* FMDM_PLR LSB holds LIODN base for odd ports */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ee82ee1384eb..87b26f063cc8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -435,7 +435,6 @@ struct fman_port_cfg {
struct fman_port_rx_pools_params {
u8 num_of_pools;
- u16 second_largest_buf_size;
u16 largest_buf_size;
};
@@ -946,8 +945,6 @@ static int set_ext_buffer_pools(struct fman_port *port)
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
port->rx_pools_params.largest_buf_size =
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
- port->rx_pools_params.second_largest_buf_size =
- sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
/* FMBM_RMPD reg. - pool depletion */
if (buf_pool_depletion->pools_grp_mode_enable) {
@@ -1728,6 +1725,20 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port)
}
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
+/**
+ * fman_port_get_device
+ * port: Pointer to the FMan port device
+ *
+ * Get the 'struct device' associated to the specified FMan port device
+ *
+ * Return: pointer to associated 'struct device'
+ */
+struct device *fman_port_get_device(struct fman_port *port)
+{
+ return port->dev;
+}
+EXPORT_SYMBOL(fman_port_get_device);
+
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
{
if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE)
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 9dbb69f40121..82f12661a46d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -157,4 +157,6 @@ int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
struct fman_port *fman_port_bind(struct device *dev);
+struct device *fman_port_get_device(struct fman_port *port);
+
#endif /* __FMAN_PORT_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 7ab8095db192..f0806ace1ae2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -608,7 +608,7 @@ static int mac_probe(struct platform_device *_of_dev)
const u8 *mac_addr;
u32 val;
u8 fman_id;
- int phy_if;
+ phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -776,8 +776,8 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the PHY connection type */
- phy_if = of_get_phy_mode(mac_node);
- if (phy_if < 0) {
+ err = of_get_phy_mode(mac_node, &phy_if);
+ if (err) {
dev_warn(dev,
"of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n",
mac_node);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 51ad86417cb1..72868a28b621 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -641,6 +641,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
const char *model;
const void *mac_addr;
int err = 0, i;
+ phy_interface_t interface;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
struct device_node *np = ofdev->dev.of_node;
@@ -805,9 +806,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
* rgmii-id really needs to be specified. Other types can be
* detected by hardware
*/
- err = of_get_phy_mode(np);
- if (err >= 0)
- priv->interface = err;
+ err = of_get_phy_mode(np, &interface);
+ if (!err)
+ priv->interface = interface;
else
priv->interface = gfar_get_interface(dev);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 59564ac99d2a..edec61dfc868 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* gvnic can only receive into registered segments. If the buffer
* can't be recycled, our only choice is to copy the data out of
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 778b87b5a06c..0a9a7ee2a866 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
seg_desc->seg.seg_addr = cpu_to_be64(addr);
}
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
+ u64 iov_offset, u64 iov_len)
+{
+ dma_addr_t dma;
+ u64 addr;
+
+ for (addr = iov_offset; addr < iov_offset + iov_len;
+ addr += PAGE_SIZE) {
+ dma = page_buses[addr / PAGE_SIZE];
+ dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
+ }
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
+ struct device *dev)
{
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
skb_copy_bits(skb, 0,
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
hlen);
+ gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+ info->iov[hdr_nfrags - 1].iov_offset,
+ info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
skb_copy_bits(skb, copy_offset,
tx->tx_fifo.base + info->iov[i].iov_offset,
info->iov[i].iov_len);
+ gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+ info->iov[i].iov_offset,
+ info->iov[i].iov_len);
copy_offset += info->iov[i].iov_len;
}
@@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
return NETDEV_TX_BUSY;
}
- nsegs = gve_tx_add_skb(tx, skb);
+ nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
netdev_tx_sent_queue(tx->netdev_txq, skb->len);
skb_tx_timestamp(skb);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84167447abe..3e9b6d543c77 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -211,7 +211,7 @@ struct hip04_priv {
#if defined(CONFIG_HI13X1_GMAC)
void __iomem *sysctrl_base;
#endif
- int phy_mode;
+ phy_interface_t phy_mode;
int chan;
unsigned int port;
unsigned int group;
@@ -237,6 +237,7 @@ struct hip04_priv {
dma_addr_t rx_phys[RX_DESC_NUM];
unsigned int rx_head;
unsigned int rx_buf_size;
+ unsigned int rx_cnt_remaining;
struct device_node *phy_node;
struct phy_device *phy;
@@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
struct net_device *ndev = priv->ndev;
struct net_device_stats *stats = &ndev->stats;
- unsigned int cnt = hip04_recv_cnt(priv);
struct rx_desc *desc;
struct sk_buff *skb;
unsigned char *buf;
@@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
/* clean up tx descriptors */
tx_remaining = hip04_tx_reclaim(ndev, false);
-
- while (cnt && !last) {
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ while (priv->rx_cnt_remaining && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
if (unlikely(!skb)) {
@@ -635,11 +635,13 @@ refill:
hip04_set_recv_desc(priv, phys);
priv->rx_head = RX_NEXT(priv->rx_head);
- if (rx >= budget)
+ if (rx >= budget) {
+ --priv->rx_cnt_remaining;
goto done;
+ }
- if (--cnt == 0)
- cnt = hip04_recv_cnt(priv);
+ if (--priv->rx_cnt_remaining == 0)
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
}
if (!(priv->reg_inten & RCV_INT)) {
@@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_device *ndev)
int i;
priv->rx_head = 0;
+ priv->rx_cnt_remaining = 0;
priv->tx_head = 0;
priv->tx_tail = 0;
hip04_reset_ppe(priv);
@@ -958,10 +961,9 @@ static int hip04_mac_probe(struct platform_device *pdev)
goto init_fail;
}
- priv->phy_mode = of_get_phy_mode(node);
- if (priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
dev_warn(d, "not find phy-mode\n");
- ret = -EINVAL;
goto init_fail;
}
@@ -1038,7 +1040,6 @@ static int hip04_remove(struct platform_device *pdev)
hip04_free_ring(ndev, d);
unregister_netdev(ndev);
- free_irq(ndev->irq, ndev);
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c41b19c760f8..247de9105d10 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1193,10 +1193,9 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto err_free_mdio;
- priv->phy_mode = of_get_phy_mode(node);
- if ((int)priv->phy_mode < 0) {
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
netdev_err(ndev, "not find phy-mode\n");
- ret = -EINVAL;
goto err_mdiobus;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3a14bbc26ea2..1c5243cc1dc6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3049,7 +3049,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
u32 sl;
u32 credit;
int i;
- const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
{DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
{DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
{DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
@@ -3059,7 +3059,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
{DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
{DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
};
- const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
{DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index f8a87f8ca983..1b0313900f98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -45,8 +45,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
- HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
@@ -71,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
-#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 03ca7d925e8e..eef1b2764d34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
return;
mutex_lock(&hnae3_common_lock);
-
+ /* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) {
if (client_tmp->type == client->type) {
existed = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index c4b7bf851a28..45f59165bcd2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -32,6 +32,8 @@
#define HNAE3_MOD_VERSION "1.0"
+#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
@@ -128,7 +130,6 @@ enum hnae3_module_type {
HNAE3_MODULE_TYPE_CR = 0x04,
HNAE3_MODULE_TYPE_KR = 0x05,
HNAE3_MODULE_TYPE_TP = 0x06,
-
};
enum hnae3_fec_mode {
@@ -364,6 +365,19 @@ struct hnae3_ae_dev {
* Enable/disable HW GRO
* add_arfs_entry
* Check the 5-tuples of flow, and create flow director rule
+ * get_vf_config
+ * Get the VF configuration setting by the host
+ * set_vf_link_state
+ * Set VF link status
+ * set_vf_spoofchk
+ * Enable/disable spoof check for specified vf
+ * set_vf_trust
+ * Enable/disable trust for specified vf, if the vf being trusted, then
+ * it can enable promisc mode
+ * set_vf_rate
+ * Set the max tx rate of specified vf.
+ * set_vf_mac
+ * Configure the default MAC for specified VF
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -529,6 +543,16 @@ struct hnae3_ae_ops {
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
void (*restore_vlan_table)(struct hnae3_handle *handle);
+ int (*get_vf_config)(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf);
+ int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
+ int link_state);
+ int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
+ bool enable);
+ int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
+ int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force);
+ int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
};
struct hnae3_dcb_ops {
@@ -551,7 +575,8 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table;
};
-#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16)
+#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
+#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
#define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 28961a68e333..6b328a259efc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
u32 value, i = 0;
int cnt;
- if (!priv->ring_data) {
- dev_err(&h->pdev->dev, "ring_data is NULL\n");
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT;
}
@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
return -EINVAL;
}
- ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
@@ -54,73 +52,73 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
- ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+ dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
- ring = ring_data[i].ring;
+ ring = &priv->ring[i];
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_L_REG);
- dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+ dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+ dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+ dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
value);
}
@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
static int hns3_dbg_queue_map(struct hnae3_handle *h)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
int i;
if (!h->ae_algo->ops->get_global_queue_id)
@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
u16 global_qid;
global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
- ring_data = &priv->ring_data[i];
- if (!ring_data || !ring_data->ring ||
- !ring_data->ring->tqp_vector)
+ if (!priv->ring || !priv->ring[i].tqp_vector)
continue;
dev_info(&h->pdev->dev,
" %4d %4d %4d\n",
- i, global_qid,
- ring_data->ring->tqp_vector->vector_irq);
+ i, global_qid, priv->ring[i].tqp_vector->vector_irq);
}
return 0;
@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
- struct hns3_nic_ring_data *ring_data;
struct hns3_desc *rx_desc, *tx_desc;
struct device *dev = &h->pdev->dev;
struct hns3_enet_ring *ring;
@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL;
}
- ring_data = priv->ring_data;
- ring = ring_data[q_num].ring;
+ ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
@@ -198,23 +190,26 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(tx_desc->addr);
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX)addr: %pad\n", &addr);
- dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
- dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
+ dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
+ dev_info(dev, "(TX)send_size: %u\n",
+ le16_to_cpu(tx_desc->tx.send_size));
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
- dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
- dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
+ dev_info(dev, "(TX)vlan_tag: %u\n",
+ le16_to_cpu(tx_desc->tx.outer_vlan_tag));
+ dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
- dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
- dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
- dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
+ dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
+ dev_info(dev, "(TX)vld_ra_ri: %u\n",
+ le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
+ dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
- ring = ring_data[q_num + h->kinfo.num_tqps].ring;
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index];
@@ -222,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: %pad\n", &addr);
- dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
- dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
- dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
- dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
- dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
- dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
- dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
- dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
- dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
+ dev_info(dev, "(RX)l234_info: %u\n",
+ le32_to_cpu(rx_desc->rx.l234_info));
+ dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
+ dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
+ dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
+ dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
+ dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
+ dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
+ le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
+ dev_info(dev, "(RX)ot_vlan_tag: %u\n",
+ le16_to_cpu(rx_desc->rx.ot_vlan_tag));
+ dev_info(dev, "(RX)bd_base_info: %u\n",
+ le32_to_cpu(rx_desc->rx.bd_base_info));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 616cad0faa21..ba0536802b13 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -483,7 +483,7 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
for (i = 0; i < h->kinfo.num_tqps; i++) {
dev_queue = netdev_get_tx_queue(ndev,
- priv->ring_data[i].queue_index);
+ priv->ring[i].queue_index);
netdev_tx_reset_queue(dev_queue);
}
}
@@ -681,7 +681,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
return 0;
ret = skb_cow_head(skb, 0);
- if (unlikely(ret))
+ if (unlikely(ret < 0))
return ret;
l3.hdr = skb_network_header(skb);
@@ -962,14 +962,6 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
-static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
-{
- /* Config bd buffer end */
- if (!!frag_end)
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
- hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
-}
-
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
@@ -1062,7 +1054,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l4_proto_err++;
u64_stats_update_end(&ring->syncp);
@@ -1072,7 +1064,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_l2l3l4_err++;
u64_stats_update_end(&ring->syncp);
@@ -1081,7 +1073,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_tso_err++;
u64_stats_update_end(&ring->syncp);
@@ -1102,9 +1094,10 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- unsigned int size, int frag_end,
- enum hns_desc_type type)
+ unsigned int size, enum hns_desc_type type)
{
+#define HNS3_LIKELY_BD_NUM 1
+
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
@@ -1118,7 +1111,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret))
+ if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1137,19 +1130,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
- u16 bdtp_fe_sc_vld_ra_ri = 0;
-
desc_cb->priv = priv;
desc_cb->dma = dma;
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16(size);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
desc->tx.bdtp_fe_sc_vld_ra_ri =
- cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
ring_ptr_move_fw(ring, next_to_use);
- return 0;
+ return HNS3_LIKELY_BD_NUM;
}
frag_buf_num = hns3_tx_bd_count(size);
@@ -1158,8 +1148,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- u16 bdtp_fe_sc_vld_ra_ri = 0;
-
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1170,11 +1158,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
(u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
- frag_end && (k == frag_buf_num - 1) ?
- 1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
- cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
@@ -1183,23 +1168,78 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc = &ring->desc[ring->next_to_use];
}
- return 0;
+ return frag_buf_num;
}
-static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
{
- unsigned int bd_num;
+ unsigned int size;
int i;
- /* if the total len is within the max bd limit */
- if (likely(skb->len <= HNS3_MAX_BD_SIZE))
- return skb_shinfo(skb)->nr_frags + 1;
+ size = skb_headlen(skb);
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
- bd_num = hns3_tx_bd_count(skb_headlen(skb));
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ if (size) {
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- bd_num += hns3_tx_bd_count(skb_frag_size(frag));
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
+{
+ struct sk_buff *frag_skb;
+ unsigned int bd_num = 0;
+
+ /* If the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
+ return skb_shinfo(skb)->nr_frags + 1U;
+
+ /* The below case will always be linearized, return
+ * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
+ */
+ if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
+ return HNS3_MAX_TSO_BD_NUM + 1U;
+
+ bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+
+ if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+
+ skb_walk_frags(skb, frag_skb) {
+ bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
}
return bd_num;
@@ -1218,26 +1258,26 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
* 7 frags to to be larger than gso header len + mss, and the remaining
* continuous 7 frags to be larger than MSS except the last 7 frags.
*/
-static bool hns3_skb_need_linearized(struct sk_buff *skb)
+static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
{
- int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
- for (i = 0; i < bd_limit; i++)
- tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
+ tot_len += bd_size[i];
- /* ensure headlen + the first 7 frags is greater than mss + header
- * and the first 7 frags is greater than mss.
- */
- if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
- hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ /* ensure the first 8 frags is greater than mss + header */
+ if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
+ skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
return true;
- /* ensure the remaining continuous 7 buffer is greater than mss */
- for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
- tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
- tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+ /* ensure every continuous 7 buffer is greater than mss
+ * except the last one.
+ */
+ for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
+ tot_len -= bd_size[i];
+ tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];
if (tot_len < skb_shinfo(skb)->gso_size)
return true;
@@ -1249,15 +1289,16 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb)
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
+ unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
struct sk_buff *skb = *out_skb;
unsigned int bd_num;
- bd_num = hns3_nic_bd_num(skb);
- if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
+ bd_num = hns3_tx_bd_num(skb, bd_size);
+ if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
- !hns3_skb_need_linearized(skb))
+ if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ !hns3_skb_need_linearized(skb, bd_size, bd_num))
goto out;
/* manual split the send packet */
@@ -1267,9 +1308,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
- bd_num = hns3_nic_bd_num(new_skb);
- if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
- (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ bd_num = hns3_tx_bd_count(new_skb->len);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
+ (!skb_is_gso(new_skb) &&
+ bd_num > HNS3_MAX_NON_TSO_BD_NUM))
return -ENOMEM;
u64_stats_update_begin(&ring->syncp);
@@ -1314,73 +1356,98 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
}
}
+static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, enum hns_desc_type type)
+{
+ unsigned int size = skb_headlen(skb);
+ int i, ret, bd_num = 0;
+
+ if (size) {
+ ret = hns3_fill_desc(ring, skb, size, type);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ return bd_num;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hns3_nic_ring_data *ring_data =
- &tx_ring_data(priv, skb->queue_mapping);
- struct hns3_enet_ring *ring = ring_data->ring;
+ struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct netdev_queue *dev_queue;
- skb_frag_t *frag;
- int next_to_use_head;
- int buf_num;
- int seg_num;
- int size;
+ int pre_ntu, next_to_use_head;
+ struct sk_buff *frag_skb;
+ int bd_num = 0;
int ret;
- int i;
/* Prefetch the data used later */
prefetch(skb->data);
- buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
- if (unlikely(buf_num <= 0)) {
- if (buf_num == -EBUSY) {
+ ret = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(ret <= 0)) {
+ if (ret == -EBUSY) {
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
goto out_net_tx_busy;
- } else if (buf_num == -ENOMEM) {
+ } else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
}
- hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
+ hns3_rl_err(netdev, "xmit error: %d!\n", ret);
goto out_err_tx_ok;
}
- /* No. of segments (plus a header) */
- seg_num = skb_shinfo(skb)->nr_frags + 1;
- /* Fill the first part */
- size = skb_headlen(skb);
-
next_to_use_head = ring->next_to_use;
- ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
- DESC_TYPE_SKB);
- if (unlikely(ret))
+ ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+ if (unlikely(ret < 0))
goto fill_err;
- /* Fill the fragments */
- for (i = 1; i < seg_num; i++) {
- frag = &skb_shinfo(skb)->frags[i - 1];
- size = skb_frag_size(frag);
+ bd_num += ret;
- ret = hns3_fill_desc(ring, frag, size,
- seg_num - 1 == i ? 1 : 0,
- DESC_TYPE_PAGE);
+ if (!skb_has_frag_list(skb))
+ goto out;
- if (unlikely(ret))
+ skb_walk_frags(skb, frag_skb) {
+ ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
goto fill_err;
+
+ bd_num += ret;
}
+out:
+ pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ (ring->desc_num - 1);
+ ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+ cpu_to_le16(BIT(HNS3_TXD_FE_B));
/* Complete translate all packets */
- dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
+ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
netdev_tx_sent_queue(dev_queue, skb->len);
wmb(); /* Commit all data before submit */
- hnae3_queue_xmit(ring->tqp, buf_num);
+ hnae3_queue_xmit(ring->tqp, bd_num);
return NETDEV_TX_OK;
@@ -1392,7 +1459,7 @@ out_err_tx_ok:
return NETDEV_TX_OK;
out_net_tx_busy:
- netif_stop_subqueue(netdev, ring_data->queue_index);
+ netif_stop_subqueue(netdev, ring->queue_index);
smp_mb(); /* Commit all data before submit */
return NETDEV_TX_BUSY;
@@ -1413,6 +1480,16 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+ /* For VF device, if there is a perm_addr, then the user will not
+ * be allowed to change the address.
+ */
+ if (!hns3_is_phys_func(h->pdev) &&
+ !is_zero_ether_addr(netdev->perm_addr)) {
+ netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
+ netdev->perm_addr, mac_addr->sa_data);
+ return -EPERM;
+ }
+
ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
if (ret) {
netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1505,7 +1582,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
- ring = priv->ring_data[idx].ring;
+ ring = &priv->ring[idx];
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes;
@@ -1523,7 +1600,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
- ring = priv->ring_data[idx + queue_num].ring;
+ ring = &priv->ring[idx + queue_num];
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes;
@@ -1633,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
int ret = -EIO;
netif_dbg(h, drv, netdev,
- "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
- vf, vlan, qos, vlan_proto);
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
+ vf, vlan, qos, ntohs(vlan_proto));
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
@@ -1643,6 +1720,29 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
return ret;
}
+static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!handle->ae_algo->ops->set_vf_spoofchk)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
+}
+
+static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (!handle->ae_algo->ops->set_vf_trust)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
+}
+
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -1671,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hns3_enet_ring *tx_ring = NULL;
+ struct hns3_enet_ring *tx_ring;
struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
@@ -1692,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
timeout_queue = i;
+ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
+ q->state,
+ jiffies_to_msecs(jiffies - trans_start));
break;
}
}
@@ -1705,7 +1808,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
priv->tx_timeout_count++;
- tx_ring = priv->ring_data[timeout_queue].ring;
+ tx_ring = &priv->ring[timeout_queue];
napi = &tx_ring->tqp_vector->napi;
netdev_info(ndev,
@@ -1805,6 +1908,57 @@ static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
#endif
+static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->get_vf_config)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_vf_config(h, vf, ivf);
+}
+
+static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
+ int link_state)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_link_state)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
+}
+
+static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_rate)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
+ false);
+}
+
+static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->set_vf_mac)
+ return -EOPNOTSUPP;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev,
+ "Invalid MAC:%pM specified. Could not set MAC\n",
+ mac);
+ return -EINVAL;
+ }
+
+ return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -1820,10 +1974,15 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+ .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
+ .ndo_set_vf_trust = hns3_set_vf_trust,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = hns3_rx_flow_steer,
#endif
-
+ .ndo_get_vf_config = hns3_nic_get_vf_config,
+ .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
+ .ndo_set_vf_rate = hns3_nic_set_vf_rate,
+ .ndo_set_vf_mac = hns3_nic_set_vf_mac,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -1843,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
return false;
default:
- dev_warn(&pdev->dev, "un-recognized pci device-id %d",
+ dev_warn(&pdev->dev, "un-recognized pci device-id %u",
dev_id);
}
@@ -2069,9 +2228,8 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
-
- netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
@@ -2081,21 +2239,24 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
netdev->vlan_features |=
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_GRO_HW;
@@ -2320,18 +2481,19 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
int head;
head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean)
return; /* no data to poll */
+ rmb(); /* Make sure head is ready before touch any data */
+
if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean);
@@ -2358,7 +2520,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
netdev_tx_completed_queue(dev_queue, pkts, bytes);
if (unlikely(pkts && netif_carrier_ok(netdev) &&
- (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
+ ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -2401,7 +2563,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- hns3_rl_err(ring->tqp_vector->napi.dev,
+ hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
break;
@@ -2510,7 +2672,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
int l3_type, l4_type;
int ol4_type;
@@ -2626,7 +2788,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
{
#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb;
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
@@ -2672,10 +2834,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
}
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
- struct sk_buff **out_skb, bool pending)
+ bool pending)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *skb = ring->skb;
+ struct sk_buff *head_skb = skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc;
@@ -2704,10 +2866,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return -ENXIO;
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
- new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
if (unlikely(!new_skb)) {
- hns3_rl_err(ring->tqp_vector->napi.dev,
+ hns3_rl_err(ring_to_netdev(ring),
"alloc rx fraglist skb fail\n");
return -ENXIO;
}
@@ -2783,7 +2944,7 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct net_device *netdev = ring_to_netdev(ring);
enum hns3_pkt_l2t_type l2_frame_type;
u32 bd_base_info, l234info, ol_info;
struct hns3_desc *desc;
@@ -2858,8 +3019,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
return 0;
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
@@ -2897,12 +3057,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
- *out_skb = skb = ring->skb;
+ skb = ring->skb;
if (ret < 0) /* alloc buffer fail */
return ret;
if (ret > 0) { /* need add frag */
- ret = hns3_add_frag(ring, desc, &skb, false);
+ ret = hns3_add_frag(ring, desc, false);
if (ret)
return ret;
@@ -2913,7 +3073,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
} else {
- ret = hns3_add_frag(ring, desc, &skb, true);
+ ret = hns3_add_frag(ring, desc, true);
if (ret)
return ret;
@@ -2931,8 +3091,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
skb_record_rx_queue(skb, ring->tqp->tqp_index);
- *out_skb = skb;
-
return 0;
}
@@ -2941,17 +3099,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = ring->skb;
int recv_pkts = 0;
int recv_bds = 0;
int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
- rmb(); /* Make sure num taken effect before the other data is touched */
-
num -= unused_count;
unused_count -= ring->pending_buf;
+ if (num <= 0)
+ goto out;
+
+ rmb(); /* Make sure num taken effect before the other data is touched */
+
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
@@ -2961,27 +3121,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb);
- if (unlikely(!skb)) /* This fault cannot be repaired */
- goto out;
-
- if (err == -ENXIO) { /* Do not get FE for the packet */
+ err = hns3_handle_rx_bd(ring);
+ /* Do not get FE for the packet or failed to alloc skb */
+ if (unlikely(!ring->skb || err == -ENXIO)) {
goto out;
- } else if (unlikely(err)) { /* Do jump the err */
- recv_bds += ring->pending_buf;
- unused_count += ring->pending_buf;
- ring->skb = NULL;
- ring->pending_buf = 0;
- continue;
+ } else if (likely(!err)) {
+ rx_fn(ring, ring->skb);
+ recv_pkts++;
}
- rx_fn(ring, skb);
recv_bds += ring->pending_buf;
unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
-
- recv_pkts++;
}
out:
@@ -3324,13 +3476,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[vector_i];
hns3_add_ring_to_group(&tqp_vector->tx_group,
- priv->ring_data[i].ring);
+ &priv->ring[i]);
hns3_add_ring_to_group(&tqp_vector->rx_group,
- priv->ring_data[i + tqp_num].ring);
+ &priv->ring[i + tqp_num]);
- priv->ring_data[i].ring->tqp_vector = tqp_vector;
- priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
+ priv->ring[i].tqp_vector = tqp_vector;
+ priv->ring[i + tqp_num].tqp_vector = tqp_vector;
tqp_vector->num_tqps++;
}
@@ -3474,28 +3626,22 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
return 0;
}
-static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
- unsigned int ring_type)
+static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+ unsigned int ring_type)
{
- struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps;
- struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring;
int desc_num;
- ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return -ENOMEM;
-
if (ring_type == HNAE3_RING_TYPE_TX) {
+ ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
- ring_data[q->tqp_index].ring = ring;
- ring_data[q->tqp_index].queue_index = q->tqp_index;
+ ring->queue_index = q->tqp_index;
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
+ ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
- ring_data[q->tqp_index + queue_num].ring = ring;
- ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
+ ring->queue_index = q->tqp_index;
ring->io_base = q->io_base;
}
@@ -3510,76 +3656,41 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
-
- return 0;
}
-static int hns3_queue_to_ring(struct hnae3_queue *tqp,
- struct hns3_nic_priv *priv)
+static void hns3_queue_to_ring(struct hnae3_queue *tqp,
+ struct hns3_nic_priv *priv)
{
- int ret;
-
- ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
- if (ret)
- return ret;
-
- ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
- if (ret) {
- devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
- return ret;
- }
-
- return 0;
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
}
static int hns3_get_ring_config(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
- int i, ret;
+ int i;
- priv->ring_data = devm_kzalloc(&pdev->dev,
- array3_size(h->kinfo.num_tqps,
- sizeof(*priv->ring_data),
- 2),
- GFP_KERNEL);
- if (!priv->ring_data)
+ priv->ring = devm_kzalloc(&pdev->dev,
+ array3_size(h->kinfo.num_tqps,
+ sizeof(*priv->ring), 2),
+ GFP_KERNEL);
+ if (!priv->ring)
return -ENOMEM;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
- if (ret)
- goto err;
- }
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_queue_to_ring(h->kinfo.tqp[i], priv);
return 0;
-err:
- while (i--) {
- devm_kfree(priv->dev, priv->ring_data[i].ring);
- devm_kfree(priv->dev,
- priv->ring_data[i + h->kinfo.num_tqps].ring);
- }
-
- devm_kfree(&pdev->dev, priv->ring_data);
- priv->ring_data = NULL;
- return ret;
}
static void hns3_put_ring_config(struct hns3_nic_priv *priv)
{
- struct hnae3_handle *h = priv->ae_handle;
- int i;
-
- if (!priv->ring_data)
+ if (!priv->ring)
return;
- for (i = 0; i < h->kinfo.num_tqps; i++) {
- devm_kfree(priv->dev, priv->ring_data[i].ring);
- devm_kfree(priv->dev,
- priv->ring_data[i + h->kinfo.num_tqps].ring);
- }
- devm_kfree(priv->dev, priv->ring_data);
- priv->ring_data = NULL;
+ devm_kfree(priv->dev, priv->ring);
+ priv->ring = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
@@ -3696,7 +3807,7 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
for (j = 0; j < tc_info->tqp_count; j++) {
struct hnae3_queue *q;
- q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
+ q = priv->ring[tc_info->tqp_offset + j].tqp;
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
tc_info->tc);
}
@@ -3711,21 +3822,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int ret;
for (i = 0; i < ring_num; i++) {
- ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
+ ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
dev_err(priv->dev,
"Alloc ring memory fail! ret=%d\n", ret);
goto out_when_alloc_ring_memory;
}
- u64_stats_init(&priv->ring_data[i].ring->syncp);
+ u64_stats_init(&priv->ring[i].syncp);
}
return 0;
out_when_alloc_ring_memory:
for (j = i - 1; j >= 0; j--)
- hns3_fini_ring(priv->ring_data[j].ring);
+ hns3_fini_ring(&priv->ring[j]);
return -ENOMEM;
}
@@ -3736,30 +3847,31 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
int i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- hns3_fini_ring(priv->ring_data[i].ring);
- hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+ hns3_fini_ring(&priv->ring[i]);
+ hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
}
return 0;
}
/* Set mac addr if it is configured. or leave it to the AE driver */
-static int hns3_init_mac_addr(struct net_device *netdev, bool init)
+static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
- if (h->ae_algo->ops->get_mac_addr && init) {
+ if (h->ae_algo->ops->get_mac_addr)
h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
- }
/* Check if the MAC address is valid, if not get a random one */
- if (!is_valid_ether_addr(netdev->dev_addr)) {
+ if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
+ } else {
+ ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ ether_addr_copy(netdev->perm_addr, mac_addr_temp);
}
if (h->ae_algo->ops->set_mac_addr)
@@ -3827,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
- dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
- dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
- dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
- dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
- dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
- dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
- dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
- dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
}
static int hns3_client_init(struct hnae3_handle *handle)
@@ -3863,7 +3975,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
- hns3_init_mac_addr(netdev, true);
+ hns3_init_mac_addr(netdev);
hns3_set_default_feature(netdev);
@@ -3897,7 +4009,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_init_all_ring(priv);
if (ret) {
ret = -ENOMEM;
- goto out_init_ring_data;
+ goto out_init_ring;
}
ret = hns3_init_phy(netdev);
@@ -3936,12 +4048,12 @@ out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
-out_init_ring_data:
+out_init_ring:
hns3_nic_uninit_vector_data(priv);
out_init_vector_data:
hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
- priv->ring_data = NULL;
+ priv->ring = NULL;
out_get_ring_cfg:
priv->ae_handle = NULL;
free_netdev(netdev);
@@ -4102,7 +4214,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
- netdev_warn(ring->tqp->handle->kinfo.netdev,
+ netdev_warn(ring_to_netdev(ring),
"reserve buffer map failed, ret = %d\n",
ret);
return ret;
@@ -4148,10 +4260,10 @@ static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
for (i = 0; i < h->kinfo.num_tqps; i++) {
struct hns3_enet_ring *ring;
- ring = priv->ring_data[i].ring;
+ ring = &priv->ring[i];
hns3_clear_tx_ring(ring);
- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ ring = &priv->ring[i + h->kinfo.num_tqps];
/* Continue to clear other rings even if clearing some
* rings failed.
*/
@@ -4175,16 +4287,16 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
if (ret)
return ret;
- hns3_init_ring_hw(priv->ring_data[i].ring);
+ hns3_init_ring_hw(&priv->ring[i]);
/* We need to clear tx ring here because self test will
* use the ring and will not run down before up
*/
- hns3_clear_tx_ring(priv->ring_data[i].ring);
- priv->ring_data[i].ring->next_to_clean = 0;
- priv->ring_data[i].ring->next_to_use = 0;
+ hns3_clear_tx_ring(&priv->ring[i]);
+ priv->ring[i].next_to_clean = 0;
+ priv->ring[i].next_to_use = 0;
- rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring);
ret = hns3_clear_rx_ring(rx_ring);
if (ret)
@@ -4331,7 +4443,7 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
bool vlan_filter_enable;
int ret;
- ret = hns3_init_mac_addr(netdev, false);
+ ret = hns3_init_mac_addr(netdev);
if (ret)
return ret;
@@ -4454,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev,
if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < 1) {
dev_err(&netdev->dev,
- "Change tqps fail, the tqp range is from 1 to %d",
+ "Change tqps fail, the tqp range is from 1 to %u",
hns3_get_max_available_channels(h));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 2110fa3b4479..345633f9dbf6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -76,7 +76,7 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32760
-#define HNS3_RING_MIN_PENDING 24
+#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
@@ -186,7 +186,7 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
-#define HNS3_TX_LAST_SIZE_M 0xffff
+#define HNS3_TX_LAST_SIZE_M 0xffff
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
@@ -195,9 +195,13 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_NUM_NORMAL 8
-#define HNS3_MAX_BD_NUM_TSO 63
-#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
+#define HNS3_MAX_NON_TSO_BD_NUM 8U
+#define HNS3_MAX_TSO_BD_NUM 63U
+#define HNS3_MAX_TSO_SIZE \
+ (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
+
+#define HNS3_MAX_NON_TSO_SIZE \
+ (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -309,7 +313,7 @@ struct hns3_desc_cb {
u16 reuse_flag;
- /* desc type, used by the ring user to mark the type of the priv data */
+ /* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
@@ -405,6 +409,7 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp;
+ int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */
@@ -430,18 +435,7 @@ struct hns3_enet_ring {
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
-};
-
-struct hns_queue;
-
-struct hns3_nic_ring_data {
- struct hns3_enet_ring *ring;
- struct napi_struct napi;
- int queue_index;
- int (*poll_one)(struct hns3_nic_ring_data *, int, void *);
- void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *);
- void (*fini_process)(struct hns3_nic_ring_data *);
-};
+} ____cacheline_internodealigned_in_smp;
enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0,
@@ -518,7 +512,7 @@ struct hns3_nic_priv {
* the cb for nic to manage the ring buffer, the first half of the
* array is for tx_ring and vice versa for the second half
*/
- struct hns3_nic_ring_data *ring_data;
+ struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
@@ -613,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define ring_to_dev(ring) ((ring)->dev)
+#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
+
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
-#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-
#define hns3_buf_size(_ring) ((_ring)->buf_size)
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 680c3508876d..b104d3c3b757 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -203,7 +203,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
kinfo = &h->kinfo;
for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring *ring = &priv->ring[i];
struct hns3_enet_ring_group *rx_group;
u64 pre_rx_pkt;
@@ -226,7 +226,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
u32 i;
for (i = start_ringid; i <= end_ringid; i++) {
- struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring *ring = &priv->ring[i];
hns3_clean_tx_ring(ring);
}
@@ -491,7 +491,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
- ring = nic_priv->ring_data[i].ring;
+ ring = &nic_priv->ring[i];
for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
@@ -500,7 +500,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) {
- ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat;
@@ -603,8 +603,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
- param->tx_pending = priv->ring_data[0].ring->desc_num;
- param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
+ param->tx_pending = priv->ring[0].desc_num;
+ param->rx_pending = priv->ring[queue_num].desc_num;
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -906,9 +906,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
h->kinfo.num_rx_desc = rx_desc_num;
for (i = 0; i < h->kinfo.num_tqps; i++) {
- priv->ring_data[i].ring->desc_num = tx_desc_num;
- priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
- rx_desc_num;
+ priv->ring[i].desc_num = tx_desc_num;
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
}
}
@@ -924,7 +923,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
return NULL;
for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
- memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ memcpy(&tmp_rings[i], &priv->ring[i],
sizeof(struct hns3_enet_ring));
tmp_rings[i].skb = NULL;
}
@@ -972,8 +971,8 @@ static int hns3_set_ringparam(struct net_device *ndev,
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring_data[0].ring->desc_num;
- old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
+ old_tx_desc_num = priv->ring[0].desc_num;
+ old_rx_desc_num = priv->ring[queue_num].desc_num;
if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num)
return 0;
@@ -986,7 +985,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
old_tx_desc_num, old_rx_desc_num,
new_tx_desc_num, new_rx_desc_num);
@@ -1002,7 +1001,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
- memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
} else {
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
@@ -1098,13 +1097,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
if (queue >= queue_num) {
netdev_err(netdev,
- "Invalid queue value %d! Queue max id=%d\n",
+ "Invalid queue value %u! Queue max id=%u\n",
queue, queue_num - 1);
return -EINVAL;
}
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce =
tx_vector->tx_group.coal.gl_adapt_enable;
@@ -1148,14 +1147,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev,
- "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->rx_coalesce_usecs, rx_gl);
}
tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
if (tx_gl != cmd->tx_coalesce_usecs) {
netdev_info(netdev,
- "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n",
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->tx_coalesce_usecs, tx_gl);
}
@@ -1183,7 +1182,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
if (rl != cmd->rx_coalesce_usecs_high) {
netdev_info(netdev,
- "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n",
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
cmd->rx_coalesce_usecs_high, rl);
}
@@ -1212,7 +1211,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev,
- "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n",
+ "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
cmd->use_adaptive_tx_coalesce,
cmd->use_adaptive_rx_coalesce);
}
@@ -1229,8 +1228,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
- tx_vector = priv->ring_data[queue].ring->tqp_vector;
- rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
tx_vector->tx_group.coal.gl_adapt_enable =
cmd->use_adaptive_tx_coalesce;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ecf58cfd253d..940ead3970d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
@@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
} while (timeout < hw->cmq.tx_timeout);
}
- if (!complete) {
+ if (!complete)
retval = -EBADE;
- } else {
+ else
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
- }
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 4821fe08b5e4..af96e7904925 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -5,8 +5,10 @@
#define __HCLGE_CMD_H
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000
+#define HCLGE_DESC_DATA_LEN 6
struct hclge_dev;
struct hclge_desc {
@@ -18,7 +20,7 @@ struct hclge_desc {
__le16 flag;
__le16 retval;
__le16 rsv;
- __le32 data[6];
+ __le32 data[HCLGE_DESC_DATA_LEN];
};
struct hclge_cmq_ring {
@@ -244,7 +246,7 @@ enum hclge_opcode_type {
/* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
@@ -259,6 +261,7 @@ enum hclge_opcode_type {
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
@@ -428,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd {
#define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD 4
+
struct hclge_func_status_cmd {
- __le32 vf_rst_state[4];
+ __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
u8 pf_state;
u8 mac_id;
u8 rsv1;
@@ -485,10 +490,12 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+#define HCLGE_CFG_CMD_CNT 4
+
struct hclge_cfg_param_cmd {
__le32 offset;
__le32 rsv;
- __le32 param[4];
+ __le32 param[HCLGE_CFG_CMD_CNT];
};
#define HCLGE_MAC_MODE 0x0
@@ -712,8 +719,7 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- __le32 mac_addr_hi32;
- __le16 mac_addr_lo16;
+ u8 mac_addr[ETH_ALEN];
__le16 rsv1;
__le16 ethter_type;
__le16 egress_port;
@@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd {
u8 rsv2[19];
};
+#define HCLGE_VLAN_ID_OFFSET_STEP 160
+#define HCLGE_VLAN_BYTE_SIZE 8
+#define HCLGE_VLAN_OFFSET_BITMAP \
+ (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset;
u8 vlan_cfg;
u8 rsv[2];
- u8 vlan_offset_bitmap[20];
+ u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
};
+#define HCLGE_MAX_VF_BYTES 16
+
struct hclge_vlan_filter_vf_cfg_cmd {
__le16 vlan_id;
u8 resp_code;
u8 rsv;
u8 vlan_cfg;
u8 rsv1[3];
- u8 vf_bitmap[16];
+ u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
@@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
@@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd {
u8 rsv1[2];
__le16 def_vlan_tag1;
__le16 def_vlan_tag2;
- u8 vf_bitmap[8];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
@@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
u8 rsv1[6];
- u8 vf_bitmap[8];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
@@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -1090,9 +1104,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read);
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param);
-
enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
struct hclge_desc *desc);
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index c063301d6060..49ad8483723d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (prio_tc[i] >= num_tc) {
dev_err(&hdev->pdev->dev,
- "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+ "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
i, prio_tc[i], num_tc);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index d0128d792717..112df34b3869 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -145,7 +145,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return;
}
- buf_len = sizeof(struct hclge_desc) * bd_num;
+ buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src) {
dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
@@ -153,7 +153,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
if (ret) {
kfree(desc_src);
return;
@@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
- desc->data[i % entries_per_desc]);
+ le32_to_cpu(desc->data[i % entries_per_desc]));
dfx_message++;
}
@@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
if (ret)
return;
- dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
if (ret)
return;
- dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
if (ret)
return;
- dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
- dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
- dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
- dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
- dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
- dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
+ dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
+ le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "tx_private_waterline: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
+ dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_CNT);
if (ret)
return;
- dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_STS_1);
if (ret)
return;
- dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
- dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
- dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
- dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
- dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
+ dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[5]));
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
- pg_shap_cfg_cmd->pg_shapping_para);
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
- pg_shap_cfg_cmd->pg_shapping_para);
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PORT_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
- port_shap_cfg_cmd->port_shapping_para);
+ le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
+ le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
+ le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
+ le32_to_cpu(desc.data[0]));
if (!hnae3_dev_dcb_supported(hdev)) {
dev_info(&hdev->pdev->dev,
@@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
- bp_to_qs_map_cmd->qs_bit_map);
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
return;
err_tm_pg_cmd_send:
@@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
- qs_to_pri_map->qs_id);
+ le16_to_cpu(qs_to_pri_map->qs_id));
dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
qs_to_pri_map->priority);
dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
@@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send;
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
+ le16_to_cpu(nq_to_qs_map->nq_id));
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
- nq_to_qs_map->qset_id);
+ le16_to_cpu(nq_to_qs_map->qset_id));
cmd = HCLGE_OPC_TM_PG_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send;
qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
+ le16_to_cpu(qs_weight->qs_id));
dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
cmd = HCLGE_OPC_TM_PRI_WEIGHT;
@@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
- shap_cfg_cmd->pri_shapping_para);
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
- shap_cfg_cmd->pri_shapping_para);
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
hclge_dbg_dump_tm_pg(hdev);
@@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
pause_param->pause_trans_gap);
dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
- pause_param->pause_trans_time);
+ le16_to_cpu(pause_param->pause_trans_time));
}
static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
@@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
- tx_buf_cmd->tx_pkt_buff[i]);
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
- rx_buf_cmd->buf_num[i]);
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
- rx_buf_cmd->shared_buf);
+ le16_to_cpu(rx_buf_cmd->shared_buf));
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
hclge_cmd_setup_basic_desc(desc, cmd, true);
@@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
dev_info(&hdev->pdev->dev, "\n");
if (!hnae3_dev_dcb_supported(hdev)) {
@@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
- rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
- rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
@@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
- rx_com_thrd->com_thrd[i].high,
- rx_com_thrd->com_thrd[i].low);
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
- rx_com_thrd->com_thrd[i].high,
- rx_com_thrd->com_thrd[i].low);
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
return;
err_qos_cmd_send:
@@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ le16_to_cpu(req0->index),
+ req0->mac_addr[0], req0->mac_addr[1],
req0->mac_addr[2], req0->mac_addr[3],
req0->mac_addr[4], req0->mac_addr[5]);
@@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
}
}
-static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
{
dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
hdev->rst_stats.pf_rst_cnt);
@@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.hw_reset_done_cnt);
dev_info(&hdev->pdev->dev, "reset count: %u\n",
hdev->rst_stats.reset_cnt);
- dev_info(&hdev->pdev->dev, "reset count: %u\n",
- hdev->rst_stats.reset_cnt);
dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt);
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
@@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
@@ -1110,6 +1123,82 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
}
}
+static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ u8 ir_u, ir_b, ir_s, bs_b, bs_s;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "qs%u failed to get tx_rate, ret=%d\n",
+ qsid, ret);
+ return;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ bs_s = hclge_tm_get_field(shapping_para, BS_S);
+
+ dev_info(&hdev->pdev->dev,
+ "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
+ qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
+}
+
+static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+{
+ struct hnae3_knic_private_info *kinfo;
+ struct hclge_vport *vport;
+ int vport_id, i;
+
+ for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
+ vport = &hdev->vport[vport_id];
+ kinfo = &vport->nic.kinfo;
+
+ dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ u16 qsid = vport->qs_offset + i;
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+ }
+ }
+}
+
+static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+#define HCLGE_MAX_QSET_NUM 1024
+
+ u16 qsid;
+ int ret;
+
+ ret = kstrtou16(cmd_buf, 0, &qsid);
+ if (ret) {
+ hclge_dbg_dump_qs_shaper_all(hdev);
+ return;
+ }
+
+ if (qsid >= HCLGE_MAX_QSET_NUM) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
+ qsid);
+ return;
+ }
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
@@ -1145,6 +1234,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
+ hclge_dbg_dump_qs_shaper(hdev,
+ &cmd_buf[sizeof("dump qs shaper")]);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 87dece0e745d..dc66b4e13377 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%d)\n", vf_id);
+ dev_err(dev, "invalid vf id(%u)\n", vf_id);
return;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index fd7f94372ff0..4f8f0684beb2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -55,6 +55,8 @@
#define HCLGE_LINK_STATUS_MS 10
+#define HCLGE_VF_VPORT_START_NUM 1
+
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -323,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP),
- .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
- .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.i_port_bitmap = 0x1,
},
};
@@ -906,6 +907,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msi = hdev->num_roce_msi;
+
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
@@ -915,6 +919,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+ hdev->num_nic_msi = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
}
return 0;
@@ -1182,6 +1195,35 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+static u32 hclge_get_max_speed(u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ return HCLGE_MAC_SPEED_100G;
+
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ return HCLGE_MAC_SPEED_50G;
+
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ return HCLGE_MAC_SPEED_40G;
+
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ return HCLGE_MAC_SPEED_25G;
+
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ return HCLGE_MAC_SPEED_10G;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ return HCLGE_MAC_SPEED_1G;
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
+ return HCLGE_MAC_SPEED_100M;
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
+ return HCLGE_MAC_SPEED_10M;
+
+ return HCLGE_MAC_SPEED_1G;
+}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1352,9 +1394,11 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_parse_link_mode(hdev, cfg.speed_ability);
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
+
if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
@@ -1507,6 +1551,10 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
+ /* ensure one to one mapping between irq and queue at default */
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
return 0;
}
@@ -1610,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
- dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
hdev->num_tqps, num_vport);
return -EINVAL;
}
@@ -1633,6 +1681,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
@@ -2285,7 +2334,8 @@ static int hclge_init_msi(struct hclge_dev *hdev)
int vectors;
int i;
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
@@ -2295,11 +2345,12 @@ static int hclge_init_msi(struct hclge_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
hdev->roce_base_msix_offset;
@@ -2726,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
mac->module_type = HNAE3_MODULE_TYPE_TP;
- if (mac->support_autoneg == true) {
+ if (mac->support_autoneg) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
linkmode_copy(mac->advertising, mac->supported);
} else {
@@ -2853,6 +2904,62 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+{
+ if (pci_num_vf(hdev->pdev) == 0) {
+ dev_err(&hdev->pdev->dev,
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
+ return NULL;
+ }
+
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
+ vf, pci_num_vf(hdev->pdev));
+ return NULL;
+ }
+
+ /* VF start from 1 in vport */
+ vf += HCLGE_VF_VPORT_START_NUM;
+ return &hdev->vport[vf];
+}
+
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ ivf->vf = vf;
+ ivf->linkstate = vport->vf_info.link_state;
+ ivf->spoofchk = vport->vf_info.spoofchk;
+ ivf->trusted = vport->vf_info.trusted;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
+
+ return 0;
+}
+
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ int link_state)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ vport->vf_info.link_state = link_state;
+
+ return 0;
+}
+
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -3173,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev,
- "flr wait timeout: %d\n", cnt);
+ "flr wait timeout: %u\n", cnt);
return -EBUSY;
}
@@ -3223,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) {
dev_err(&hdev->pdev->dev,
- "set vf(%d) rst failed %d!\n",
+ "set vf(%u) rst failed %d!\n",
vport->vport_id, ret);
return ret;
}
@@ -3238,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_inform_reset_assert_to_vf(vport);
if (ret)
dev_warn(&hdev->pdev->dev,
- "inform reset to vf(%d) failed %d!\n",
+ "inform reset to vf(%u) failed %d!\n",
vport->vport_id, ret);
}
@@ -3551,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt++;
set_bit(hdev->reset_type, &hdev->reset_pending);
dev_info(&hdev->pdev->dev,
- "re-schedule reset task(%d)\n",
+ "re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt);
return true;
}
@@ -3562,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_reset_handshake(hdev, true);
dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+ hclge_dbg_dump_rst_info(hdev);
+
return false;
}
@@ -3745,12 +3855,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- } else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request) {
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
- else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
hdev->reset_level = HNAE3_FUNC_RESET;
+ }
dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
hdev->reset_level);
@@ -3875,6 +3986,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_sync_vlan_filter(hdev);
+
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
@@ -3903,6 +4015,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
@@ -4380,7 +4493,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
*/
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
dev_err(&hdev->pdev->dev,
- "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
rss_size);
return -EINVAL;
}
@@ -4558,8 +4671,8 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
return ret;
}
-int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
- struct hclge_promisc_param *param)
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
+ struct hclge_promisc_param *param)
{
struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
@@ -4586,8 +4699,9 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
return ret;
}
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id)
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
+ bool en_uc, bool en_mc, bool en_bc,
+ int vport_id)
{
if (!param)
return;
@@ -4602,12 +4716,21 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
param->vf_id = vport_id;
}
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_promisc_param param;
+
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
+ vport->vport_id);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
+}
+
static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hclge_promisc_param param;
bool en_bc_pmc = true;
/* For revision 0x20, if broadcast promisc enabled, vlan filter is
@@ -4617,9 +4740,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
if (handle->pdev->revision == 0x20)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
- hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
- vport->vport_id);
- return hclge_cmd_set_promisc_mode(hdev, &param);
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -4721,7 +4843,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "Unsupported flow director mode %d\n",
+ "Unsupported flow director mode %u\n",
hdev->fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
@@ -5051,7 +5173,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret) {
dev_err(&hdev->pdev->dev,
- "fd key_y config fail, loc=%d, ret=%d\n",
+ "fd key_y config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5060,7 +5182,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true);
if (ret)
dev_err(&hdev->pdev->dev,
- "fd key_x config fail, loc=%d, ret=%d\n",
+ "fd key_x config fail, loc=%u, ret=%d\n",
rule->queue_id, ret);
return ret;
}
@@ -5309,7 +5431,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
}
} else if (!is_add) {
dev_err(&hdev->pdev->dev,
- "delete fail, rule %d is inexistent\n",
+ "delete fail, rule %u is inexistent\n",
location);
return -EINVAL;
}
@@ -5549,7 +5671,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%d) > max vf num (%d)\n",
+ "Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs);
return -EINVAL;
}
@@ -5559,7 +5681,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
- "Error: queue id (%d) > max tqp num (%d)\n",
+ "Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1);
return -EINVAL;
}
@@ -5618,7 +5740,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n", fs->location);
+ "Delete fail, rule %u is inexistent\n", fs->location);
return -ENOENT;
}
@@ -5695,7 +5817,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (ret) {
dev_warn(&hdev->pdev->dev,
- "Restore rule %d failed, remove it\n",
+ "Restore rule %u failed, remove it\n",
rule->location);
clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node);
@@ -6688,7 +6810,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -6940,7 +7062,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev,
- "Alloc umv space failed, want %d, get %d\n",
+ "Alloc umv space failed, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size);
mutex_init(&hdev->umv_mutex);
@@ -7108,7 +7230,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
/* check if we just hit the duplicate */
if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
vport->vport_id, addr);
return 0;
}
@@ -7289,7 +7411,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
- if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
if (uc_flag && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_addr);
@@ -7361,7 +7483,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
@@ -7383,7 +7505,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
break;
default:
dev_err(&hdev->pdev->dev,
- "add mac ethertype failed for undefined, code=%d.\n",
+ "add mac ethertype failed for undefined, code=%u.\n",
resp_code);
return_status = -EIO;
}
@@ -7391,6 +7513,67 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
return return_status;
}
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
+ u8 *mac_addr)
+{
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int i;
+
+ if (is_zero_ether_addr(mac_addr))
+ return false;
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+ req.egress_port = cpu_to_le16(egress_port);
+ hclge_prepare_mac_addr(&req, mac_addr, false);
+
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
+ return true;
+
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
+ if (i != vf_idx &&
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
+ return true;
+
+ return false;
+}
+
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ u8 *mac_addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ dev_info(&hdev->pdev->dev,
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
+ mac_addr);
+ return 0;
+ }
+
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
+ mac_addr);
+ return -EEXIST;
+ }
+
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+
+ return hclge_inform_reset_assert_to_vf(vport);
+}
+
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
const struct hclge_mac_mgr_tbl_entry_cmd *req)
{
@@ -7563,7 +7746,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan,
__be16 proto)
{
-#define HCLGE_MAX_VF_BYTES 16
+ struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
@@ -7572,10 +7755,18 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
int ret;
/* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
*/
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
return 0;
+ }
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
@@ -7619,7 +7810,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
}
dev_err(&hdev->pdev->dev,
- "Add vf vlan filter fail, ret =%d.\n",
+ "Add vf vlan filter fail, ret =%u.\n",
req0->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
@@ -7635,7 +7826,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return 0;
dev_err(&hdev->pdev->dev,
- "Kill vf vlan filter fail, ret =%d.\n",
+ "Kill vf vlan filter fail, ret =%u.\n",
req0->resp_code);
}
@@ -7654,9 +7845,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
- vlan_offset_160 = vlan_id / 160;
- vlan_offset_byte = (vlan_id % 160) / 8;
- vlan_offset_byte_val = 1 << (vlan_id % 8);
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+ HCLGE_VLAN_BYTE_SIZE;
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160;
@@ -7684,7 +7876,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
proto);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set %d vport vlan filter config fail, ret =%d.\n",
+ "Set %u vport vlan filter config fail, ret =%d.\n",
vport_id, ret);
return ret;
}
@@ -7696,7 +7888,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %d is already in vlan %d\n",
+ "Add port vlan failed, vport %u is already in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -7704,7 +7896,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill &&
!test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %d is not in vlan %d\n",
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
vport_id, vlan_id);
return -EINVAL;
}
@@ -8072,12 +8264,15 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
}
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- if (vlan->hd_tbl_status)
- hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id,
- false);
+ int ret;
+
+ if (!vlan->hd_tbl_status)
+ continue;
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
}
}
@@ -8357,6 +8552,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret;
+ /* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
@@ -8772,16 +8968,16 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
- dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
- dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
- dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
@@ -8797,10 +8993,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt;
+ int rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
- rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic);
if (ret)
return ret;
@@ -8900,7 +9095,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
switch (client->type) {
case HNAE3_CLIENT_KNIC:
-
hdev->nic_client = client;
vport->nic.client = client;
ret = hclge_init_nic_client_instance(ae_dev, vport);
@@ -9099,7 +9293,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%d) rst failed %d!\n",
+ "clear vf(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
@@ -9121,6 +9315,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+
+ /* HW supprt 2 layer vlan */
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock);
@@ -9319,6 +9515,219 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_config_switch_param(hdev, vf, enable,
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
+}
+
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
+ enable, vf);
+}
+
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
+{
+ int ret;
+
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+ return ret;
+ }
+
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+
+ return ret;
+}
+
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
+ bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_spoofchk = enable ? 1 : 0;
+ int ret;
+
+ if (hdev->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.spoofchk == new_spoofchk)
+ return 0;
+
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+ else if (enable && hclge_is_umv_space_full(vport))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
+ if (ret)
+ return ret;
+
+ vport->vf_info.spoofchk = new_spoofchk;
+ return 0;
+}
+
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ int i;
+
+ if (hdev->pdev->revision == 0x20)
+ return 0;
+
+ /* resume the vf spoof check state after reset */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
+ vport->vf_info.spoofchk);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_trusted = enable ? 1 : 0;
+ bool en_bc_pmc;
+ int ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.trusted == new_trusted)
+ return 0;
+
+ /* Disable promisc mode for VF if it is not trusted any more. */
+ if (!enable && vport->vf_info.promisc_enable) {
+ en_bc_pmc = hdev->pdev->revision != 0x20;
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
+ en_bc_pmc);
+ if (ret)
+ return ret;
+ vport->vf_info.promisc_enable = 0;
+ hclge_inform_vf_promisc_info(vport);
+ }
+
+ vport->vf_info.trusted = new_trusted;
+
+ return 0;
+}
+
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
+{
+ int ret;
+ int vf;
+
+ /* reset vf rate to default value */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ struct hclge_vport *vport = &hdev->vport[vf];
+
+ vport->vf_info.max_tx_rate = 0;
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to reset to default, ret=%d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ if (min_tx_rate != 0 ||
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ dev_err(&hdev->pdev->dev,
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ return 0;
+
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport->vf_info.max_tx_rate = max_tx_rate;
+
+ return 0;
+}
+
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* resume the vf max_tx_rate after reset */
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ /* zero means max rate, after reset, firmware already set it to
+ * max rate, so just continue.
+ */
+ if (!vport->vf_info.max_tx_rate)
+ continue;
+
+ ret = hclge_set_vf_rate(handle, vf, 0,
+ vport->vf_info.max_tx_rate, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
+ vf, vport->vf_info.max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_reset_vport_state(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
@@ -9396,6 +9805,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
*/
@@ -9418,6 +9830,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
}
hclge_reset_vport_state(hdev);
+ ret = hclge_reset_vport_spoofchk(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_resume_vf_rate(hdev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -9430,6 +9849,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_reset_vf_rate(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
@@ -9494,8 +9914,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
- int cur_rss_size = kinfo->rss_size;
- int cur_tqps = kinfo->num_tqps;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir;
@@ -9549,7 +9969,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
out:
if (!ret)
dev_info(&hdev->pdev->dev,
- "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size,
cur_tqps, kinfo->rss_size * kinfo->num_tc);
@@ -10152,6 +10572,12 @@ static const struct hnae3_ae_ops hclge_ops = {
.mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy,
.restore_vlan_table = hclge_restore_vlan_table,
+ .get_vf_config = hclge_get_vf_config,
+ .set_vf_link_state = hclge_set_vf_link_state,
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
+ .set_vf_trust = hclge_set_vf_trust,
+ .set_vf_rate = hclge_set_vf_rate,
+ .set_vf_mac = hclge_set_vf_mac,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 3e9574a9e22d..1c0f6dfb95fc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -141,7 +141,6 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
-#define HCLGE_VF_NUM_PER_BYTE 8
enum HLCGE_PORT_TYPE {
HOST_PORT,
@@ -226,8 +225,6 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_OTHER,
};
-#define HCLGE_MPF_ENBALE 1
-
enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
@@ -258,6 +255,7 @@ struct hclge_mac {
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u32 speed;
+ u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
u32 fec_mode; /* active fec mode */
@@ -655,7 +653,6 @@ struct hclge_rst_stats {
u32 hw_reset_done_cnt; /* the number of HW reset has completed */
u32 pf_rst_cnt; /* the number of PF reset */
u32 flr_rst_cnt; /* the number of FLR */
- u32 core_rst_cnt; /* the number of CORE reset */
u32 global_rst_cnt; /* the number of GLOBAL */
u32 imp_rst_cnt; /* the number of IMP reset */
u32 reset_cnt; /* the number of reset */
@@ -763,6 +760,7 @@ struct hclge_dev {
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
+ u16 num_nic_msi; /* Num of nic vectors for this PF */
u16 num_roce_msi; /* Num of roce vectors for this PF */
int roce_base_vector;
@@ -885,6 +883,15 @@ struct hclge_port_base_vlan_config {
struct hclge_vlan_info vlan_info;
};
+struct hclge_vf_info {
+ int link_state;
+ u8 mac[ETH_ALEN];
+ u32 spoofchk;
+ u32 max_tx_rate;
+ u32 trusted;
+ u16 promisc_enable;
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -916,15 +923,15 @@ struct hclge_vport {
unsigned long state;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
+ struct hclge_vf_info vf_info;
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */
};
-void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
- bool en_mc, bool en_bc, int vport_id);
-
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
@@ -993,4 +1000,6 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f5da28a60d00..0b433ebe6a2d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
- "PF fail to gen resp to VF len %d exceeds max len %d\n",
+ "PF fail to gen resp to VF len %u exceeds max len %u\n",
resp_data_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
/* If resp_data_len is too long, set the value to max length
@@ -205,12 +205,38 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_bc = req->msg[1] ? true : false;
- struct hclge_promisc_param param;
+#define HCLGE_MBX_BC_INDEX 1
+#define HCLGE_MBX_UC_INDEX 2
+#define HCLGE_MBX_MC_INDEX 3
- /* vf is not allowed to enable unicast/multicast broadcast */
- hclge_promisc_param_init(&param, false, false, en_bc, vport->vport_id);
- return hclge_cmd_set_promisc_mode(vport->back, &param);
+ bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
+ bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
+ bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
+ int ret;
+
+ if (!vport->vf_info.trusted) {
+ en_uc = false;
+ en_mc = false;
+ }
+
+ ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
+ if (req->mbx_need_resp)
+ hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
+
+ vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
+
+ return ret;
+}
+
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
+{
+ u8 dest_vfid = (u8)vport->vport_id;
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
+
+ hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
@@ -223,6 +249,20 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+ /* If VF MAC has been configured by the host then it
+ * cannot be overridden by the MAC specified by the VM.
+ */
+ if (!is_zero_ether_addr(vport->vf_info.mac) &&
+ !ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ status = -EPERM;
+ goto out;
+ }
+
+ if (!is_valid_ether_addr(mac_addr)) {
+ status = -EINVAL;
+ goto out;
+ }
+
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
if (status) {
@@ -245,11 +285,12 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_UC);
} else {
dev_err(&hdev->pdev->dev,
- "failed to set unicast mac addr, unknown subcode %d\n",
+ "failed to set unicast mac addr, unknown subcode %u\n",
mbx_req->msg[1]);
return -EIO;
}
+out:
if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
@@ -278,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_MC);
} else {
dev_err(&hdev->pdev->dev,
- "failed to set mcast mac addr, unknown subcode %d\n",
+ "failed to set mcast mac addr, unknown subcode %u\n",
mbx_req->msg[1]);
return -EIO;
}
@@ -324,6 +365,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
+ if (mbx_req->mbx_need_resp)
+ return hclge_gen_resp_to_vf(vport, mbx_req, status,
+ NULL, 0);
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = msg_cmd->is_kill ? true : false;
@@ -398,6 +442,13 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport,
HCLGE_TQPS_RSS_INFO_LEN);
}
+static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
+ ETH_ALEN);
+}
+
static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -428,6 +479,9 @@ static int hclge_get_vf_media_type(struct hclge_vport *vport,
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+#define HCLGE_VF_LINK_STATE_UP 1U
+#define HCLGE_VF_LINK_STATE_DOWN 0U
+
struct hclge_dev *hdev = vport->back;
u16 link_status;
u8 msg_data[8];
@@ -435,7 +489,19 @@ static int hclge_get_link_info(struct hclge_vport *vport,
u16 duplex;
/* mac.link can only be 0 or 1 */
- link_status = (u16)hdev->hw.mac.link;
+ switch (vport->vf_info.link_state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link_status = HCLGE_VF_LINK_STATE_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link_status = HCLGE_VF_LINK_STATE_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ default:
+ link_status = (u16)hdev->hw.mac.link;
+ break;
+ }
+
duplex = hdev->hw.mac.duplex;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
@@ -489,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
int ret;
- dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
+ dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
vport->vport_id);
ret = hclge_func_reset_cmd(hdev, vport->vport_id);
@@ -524,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ sizeof(resp_data));
}
static int hclge_get_rss_key(struct hclge_vport *vport,
@@ -614,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %d\n",
+ "dropped invalid mailbox message, code = %u\n",
req->msg[0]);
/* dropping/not processing this invalid message */
@@ -749,12 +816,19 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_PUSH_LINK_STATUS:
hclge_handle_link_change_event(hdev, req);
break;
+ case HCLGE_MBX_GET_MAC_ADDR:
+ ret = hclge_get_vf_mac_addr(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get MAC for VF\n",
+ ret);
+ break;
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
break;
default:
dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %d\n",
+ "un-supported mailbox message, code = %u\n",
req->msg[0]);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index dc4dfd4602ab..696c5ae922e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
"no phy device is connected to mdio bus\n");
return 0;
} else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
- dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
hdev->hw.mac.phy_addr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 9f0e35f27789..fbc39a2480d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -46,7 +46,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
- const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
+ static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */
6 * 8, /* Port level */
@@ -511,6 +511,49 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u8 ir_b, ir_u, ir_s;
+ u32 shaper_para;
+ int ret, i;
+
+ if (!max_tx_rate)
+ max_tx_rate = HCLGE_ETHER_MAX_RATE;
+
+ ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
+ &ir_b, &ir_u, &ir_s);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
+ false);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
+ shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ vport->vport_id, shap_cfg_cmd->qs_id,
+ max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -532,14 +575,21 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
+ dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
kinfo->rss_size, kinfo->req_rss_size);
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* if user not set rss, the rss_size should compare with the
+ * valid msi numbers to ensure one to one map between tqp and
+ * irq as default.
+ */
+ if (!kinfo->req_rss_size)
+ max_rss_size = min_t(u16, max_rss_size,
+ (hdev->num_nic_msi - 1) /
+ kinfo->num_tc);
+
/* Set to the maximum specification value (max_rss_size). */
- dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
- kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 818610988d34..95ef6e1204cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -96,6 +96,12 @@ struct hclge_pg_shapping_cmd {
__le32 pg_shapping_para;
};
+struct hclge_qs_shapping_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ __le32 qs_shapping_para;
+};
+
#define HCLGE_BP_GRP_NUM 32
#define HCLGE_BP_SUB_GRP_ID_S 0
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
@@ -154,4 +160,6 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5d1cc5d1b6e..af2245e3bb95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
rmb(); /* Make sure head is ready before touch any data */
if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
@@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
u32 reg_val;
if (ring->flag == HCLGEVF_TYPE_CSQ) {
- reg_val = (u32)ring->desc_dma_addr;
+ reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
@@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
} else {
- reg_val = (u32)ring->desc_dma_addr;
+ reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e3090b3dab1d..25d78a5aaa34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo->tqp[i] = &hdev->htqp[i].q;
}
+ /* after init the max rss_size and tqps, adjust the default tqp numbers
+ * and rss size with the actual vector numbers
+ */
+ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+ kinfo->rss_size);
+
return 0;
}
@@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
int alloc = 0;
int i, j;
+ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
@@ -1105,6 +1113,7 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
}
static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc,
bool en_bc_pmc)
{
struct hclge_mbx_vf_to_pf_cmd *req;
@@ -1112,10 +1121,11 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
int ret;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
-
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
req->msg[1] = en_bc_pmc ? 1 : 0;
+ req->msg[2] = en_uc_pmc ? 1 : 0;
+ req->msg[3] = en_mc_pmc ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -1125,9 +1135,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
return ret;
}
-static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
{
- return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct pci_dev *pdev = hdev->pdev;
+ bool en_bc_pmc;
+
+ en_bc_pmc = pdev->revision != 0x20;
+
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
@@ -1166,11 +1184,37 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
}
}
+static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
+{
+ u8 host_mac[ETH_ALEN];
+ int status;
+
+ status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0,
+ true, host_mac, ETH_ALEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "fail to get VF MAC from host %d", status);
+ return status;
+ }
+
+ ether_addr_copy(p, host_mac);
+
+ return 0;
+}
+
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 host_mac_addr[ETH_ALEN];
- ether_addr_copy(p, hdev->hw.mac.mac_addr);
+ if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
+ return;
+
+ hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
+ if (hdev->has_pf_mac)
+ ether_addr_copy(p, host_mac_addr);
+ else
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
@@ -1267,7 +1311,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
memcpy(&msg_data[3], &proto, sizeof(proto));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+ HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
@@ -1505,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
return ret;
}
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
+ hdev->rst_stats.vf_func_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
+ hdev->rst_stats.vf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+ hdev->rst_stats.rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
+ hdev->rst_stats.rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+ hdev->rst_stats.rst_fail_cnt);
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
+ dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
/* recover handshake status with IMP when reset fail */
hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
- dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
@@ -1519,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
+ } else {
+ hclgevf_dump_rst_info(hdev);
}
}
@@ -1740,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t)
static void hclgevf_reset_service_task(struct work_struct *work)
{
+#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
+
struct hclgevf_dev *hdev =
container_of(work, struct hclgevf_dev, rst_service_task);
int ret;
@@ -1792,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* We cannot do much for 2. but to check first we can try reset
* our PCIe + stack and see if it alleviates the problem.
*/
- if (hdev->reset_attempts > 3) {
+ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
@@ -2095,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
if (ret)
return ret;
-
}
/* Initialize RSS indirect table */
@@ -2246,13 +2320,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+ if (hnae3_dev_roce_supported(hdev))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
hdev->num_msi,
PCI_IRQ_MSIX);
else
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
@@ -2263,11 +2338,12 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
- "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
+
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
@@ -2338,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info begin:\n");
- dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
- dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
- dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
- dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
- dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
- dev_info(dev, "PF media type of this VF: %d\n",
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %u\n",
hdev->hw.mac.media_type);
dev_info(dev, "VF info end.\n");
@@ -2533,7 +2609,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
req = (struct hclgevf_query_res_cmd *)desc.data;
- if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M,
@@ -2542,6 +2618,9 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msix = hdev->num_roce_msix;
+
/* VF should have NIC vectors and Roce vectors, NIC vectors
* are queued before Roce vectors. The offset is fixed to 64.
*/
@@ -2551,6 +2630,15 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ hdev->num_nic_msix = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for vf(min:2).\n",
+ hdev->num_nic_msix);
+ return -EINVAL;
}
return 0;
@@ -2626,12 +2714,6 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
- if (pdev->revision >= 0x21) {
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- return ret;
- }
-
dev_info(&hdev->pdev->dev, "Reset done\n");
return 0;
@@ -2706,17 +2788,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_config;
- /* vf is not allowed to enable unicast/multicast promisc mode.
- * For revision 0x20, default to disable broadcast promisc mode,
- * firmware makes sure broadcast packets can be accepted.
- * For revision 0x21, default to enable broadcast promisc mode.
- */
- if (pdev->revision >= 0x21) {
- ret = hclgevf_set_promisc_mode(hdev, true);
- if (ret)
- goto err_config;
- }
-
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -3130,6 +3201,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_global_queue_id = hclgevf_get_qid_global,
.set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index bdde3afc286b..2f4c81bf4169 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -150,8 +150,6 @@ enum hclgevf_states {
HCLGEVF_STATE_CMD_DISABLE,
};
-#define HCLGEVF_MPF_ENBALE 1
-
struct hclgevf_mac {
u8 media_type;
u8 module_type;
@@ -266,10 +264,12 @@ struct hclgevf_dev {
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
+ u8 has_pf_mac;
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
+ u16 num_nic_msix; /* Num of nic vectors for this VF */
u16 num_roce_msix; /* Num of roce vectors for this VF */
u16 roce_base_msix_offset;
int roce_base_vector;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index a108191c9e50..7cbd715d5e7a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
- "VF mbx response len(=%d) exceeds maximum(=%d)\n",
+ "VF mbx response len(=%u) exceeds maximum(=%u)\n",
resp_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
return -EINVAL;
@@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
- "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
@@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
- "VF could not match resp code(code0=%d,code1=%d), %d\n",
+ "VF could not match resp code(code0=%u,code1=%u), %d\n",
code0, code1, mbx_resp->resp_status);
dev_err(&hdev->pdev->dev,
- "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+ "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
r_code0, r_code1);
return -EIO;
}
@@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %d\n",
+ "dropped invalid mailbox message, code = %u\n",
req->msg[0]);
/* dropping/not processing this invalid message */
@@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_PF_VF_RESP:
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
- "VF mbx resp flag not clear(%d)\n",
+ "VF mbx resp flag not clear(%u)\n",
req->msg[1]);
resp->received_resp = true;
@@ -205,6 +205,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HCLGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -218,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
if (atomic_read(&hdev->arq.count) >=
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
- "Async Q full, dropping msg(%d)\n",
+ "Async Q full, dropping msg(%u)\n",
req->msg[1]);
break;
}
@@ -235,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break;
default:
dev_err(&hdev->pdev->dev,
- "VF received unsupported(%d) mbx msg from PF\n",
+ "VF received unsupported(%u) mbx msg from PF\n",
req->msg[0]);
break;
}
@@ -248,6 +249,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq->next_to_use);
}
+static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
+ u16 promisc_info)
+{
+ if (!promisc_info)
+ dev_info(&hdev->pdev->dev,
+ "Promisc mode is closed by host for being untrusted.\n");
+}
+
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
@@ -313,9 +322,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
break;
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ hclgevf_parse_promisc_info(hdev, msg_q[1]);
+ break;
default:
dev_err(&hdev->pdev->dev,
- "fetched unsupported(%d) message from arq\n",
+ "fetched unsupported(%u) message from arq\n",
msg_q[0]);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 3e863a71c513..7df5d7d211d4 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -148,11 +148,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
{
u32 time_cnt;
u32 reg_value;
+ int ret;
regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
- regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ if (ret)
+ return ret;
+
reg_value &= st_msk;
if ((!!check_st) == (!!reg_value))
break;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 211c5f74b4c8..aec7e98bcc85 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -96,6 +96,8 @@
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
+#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
+
#define DMA_WBACK(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
@@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
unregister_netdev (dev);
dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ lp->dma_addr, LIB82596_DMA_ATTR);
free_netdev (dev);
return 0;
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 1274ad24d6af..f9742af7f142 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
&lp->dma_addr, GFP_KERNEL,
- DMA_ATTR_NON_CONSISTENT);
+ LIB82596_DMA_ATTR);
if (!dma) {
printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
i = register_netdev(dev);
if (i) {
dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ dma, lp->dma_addr, LIB82596_DMA_ATTR);
return i;
}
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6eb6c2ff7f09..6436a98c5953 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -24,6 +24,8 @@
static const char sni_82596_string[] = "snirm_82596";
+#define LIB82596_DMA_ATTR 0
+
#define DMA_WBACK(priv, addr, len) do { } while (0)
#define DMA_INV(priv, addr, len) do { } while (0)
#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
unregister_netdev(dev);
dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+ lp->dma_addr, LIB82596_DMA_ATTR);
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9e43c9ace9c2..2e40425d8a34 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2849,6 +2849,7 @@ static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
+ int err;
/* Read config from device-tree */
if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2897,8 +2898,8 @@ static int emac_init_config(struct emac_instance *dev)
dev->mal_burst_size = 256;
/* PHY mode needs some decoding */
- dev->phy_mode = of_get_phy_mode(np);
- if (dev->phy_mode < 0)
+ err = of_get_phy_mode(np, &dev->phy_mode);
+ if (err)
dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index e9cda024cbf6..89a1b0fea158 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -171,7 +171,7 @@ struct emac_instance {
struct mal_commac commac;
/* PHY infos */
- int phy_mode;
+ phy_interface_t phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index b9e821de2ac6..57a25c7a9e70 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -78,7 +78,8 @@ static inline u32 zmii_mode_mask(int mode, int input)
}
}
-int zmii_attach(struct platform_device *ofdev, int input, int *mode)
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode)
{
struct zmii_instance *dev = platform_get_drvdata(ofdev);
struct zmii_regs __iomem *p = dev->base;
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
index 41d46e9b87ba..65daedc78594 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.h
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -50,7 +50,8 @@ struct zmii_instance {
int zmii_init(void);
void zmii_exit(void);
-int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+int zmii_attach(struct platform_device *ofdev, int input,
+ phy_interface_t *mode);
void zmii_detach(struct platform_device *ofdev, int input);
void zmii_get_mdio(struct platform_device *ofdev, int input);
void zmii_put_mdio(struct platform_device *ofdev, int input);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 2b073a3c0b84..f59d9a8e35e2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2878,12 +2878,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
+ struct irq_desc *desc = irq_to_desc(scrq->irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
- rc = plpar_hcall_norets(H_EOI, val);
- if (rc)
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ chip->irq_eoi(&desc->irq_data);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 71d3d8854d8f..be56e631d693 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count;
+ err = 0;
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
err = e1000_setup_all_rx_resources(adapter);
@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
- if (err)
- goto err_setup;
}
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
- return 0;
+ return err;
+
err_setup_tx:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
@@ -646,7 +646,6 @@ err_alloc_rx:
err_alloc_tx:
if (netif_running(adapter->netdev))
e1000_up(adapter);
-err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index de8c5818a305..adce7e319b9e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -894,8 +894,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- /* fall through */
case e1000_pch_cnp:
+ /* fall through */
+ case e1000_pch_tgp:
mask |= BIT(18);
break;
default:
@@ -1559,6 +1560,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index eff75bd8a8f0..f556163481cb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -86,6 +86,17 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0
#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1
#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2
+#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E
+#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F
+#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C
+#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D
+#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53
+#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55
+#define E1000_DEV_ID_PCH_TGP_I219_LM13 0x15FB
+#define E1000_DEV_ID_PCH_TGP_I219_V13 0x15FC
+#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
+#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
+#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_REVISION_4 4
@@ -109,6 +120,7 @@ enum e1000_mac_type {
e1000_pch_lpt,
e1000_pch_spt,
e1000_pch_cnp,
+ e1000_pch_tgp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a1fab77b2096..b4135c50e905 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -316,6 +316,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -458,6 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -700,6 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1638,6 +1641,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2090,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3127,6 +3132,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
switch (hw->mac.type) {
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4070,6 +4076,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ case e1000_pch_tgp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d7d56e42a6aa..032b88619054 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3538,6 +3538,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
break;
case e1000_pch_cnp:
+ case e1000_pch_tgp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4049,6 +4050,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ /* fall-through */
+ case e1000_pch_tgp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4715,12 +4718,12 @@ int e1000e_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (netif_device_present(netdev)) {
e1000e_down(adapter, true);
e1000_free_irq(adapter);
/* Link status message must follow this format */
- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", netdev->name);
}
napi_disable(&adapter->napi);
@@ -6294,14 +6297,188 @@ fl_out:
pm_runtime_put_sync(netdev->dev.parent);
}
+#ifdef CONFIG_PM_SLEEP
+/* S0ix implementation */
+static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mac_data;
+ u16 phy_data;
+
+ /* Disable the periodic inband message,
+ * don't request PCIe clock in K1 page770_17[10:9] = 10b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= ~HV_PM_CTRL_K1_CLK_REQ;
+ phy_data |= BIT(10);
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
+
+ /* Make sure we don't exit K1 every time a new packet arrives
+ * 772_29[5] = 1 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data |= BIT(5);
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to SMBus
+ * Force the SMBus in PHY page769_23[0] = 1
+ * Force the SMBus in MAC CTRL_EXT[11] = 1
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+
+ /* DFT control: PHY bit: page769_20[0] = 1
+ * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
+ */
+ e1e_rphy(hw, I82579_DFT_CTRL, &phy_data);
+ phy_data |= BIT(0);
+ e1e_wphy(hw, I82579_DFT_CTRL, phy_data);
+
+ mac_data = er32(EXTCNF_CTRL);
+ mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, mac_data);
+
+ /* Check MAC Tx/Rx packet buffer pointers.
+ * Reset MAC Tx/Rx packet buffer pointers to suppress any
+ * pending traffic indication that would prevent power gating.
+ */
+ mac_data = er32(TDFH);
+ if (mac_data)
+ ew32(TDFH, 0);
+ mac_data = er32(TDFT);
+ if (mac_data)
+ ew32(TDFT, 0);
+ mac_data = er32(TDFHS);
+ if (mac_data)
+ ew32(TDFHS, 0);
+ mac_data = er32(TDFTS);
+ if (mac_data)
+ ew32(TDFTS, 0);
+ mac_data = er32(TDFPC);
+ if (mac_data)
+ ew32(TDFPC, 0);
+ mac_data = er32(RDFH);
+ if (mac_data)
+ ew32(RDFH, 0);
+ mac_data = er32(RDFT);
+ if (mac_data)
+ ew32(RDFT, 0);
+ mac_data = er32(RDFHS);
+ if (mac_data)
+ ew32(RDFHS, 0);
+ mac_data = er32(RDFTS);
+ if (mac_data)
+ ew32(RDFTS, 0);
+ mac_data = er32(RDFPC);
+ if (mac_data)
+ ew32(RDFPC, 0);
+
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(31);
+ mac_data &= ~BIT(0);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Dynamic Power Gating Enable */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= BIT(3);
+ ew32(CTRL_EXT, mac_data);
+
+ /* Enable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+ ew32(CTRL_EXT, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+}
+
+static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mac_data;
+ u16 phy_data;
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Enable the time synchronization clock */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(0);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable Dynamic Power Gating */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFFFFFF7;
+ ew32(CTRL_EXT, mac_data);
+
+ /* Disable the Dynamic Clock Gating in the DMA and MAC */
+ mac_data = er32(CTRL_EXT);
+ mac_data &= 0xFFF7FFFF;
+ ew32(CTRL_EXT, mac_data);
+
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
+
+ /* Enable the periodic inband message,
+ * Request PCIe clock in K1 page770_17[10:9] =01b
+ */
+ e1e_rphy(hw, HV_PM_CTRL, &phy_data);
+ phy_data &= 0xFBFF;
+ phy_data |= HV_PM_CTRL_K1_CLK_REQ;
+ e1e_wphy(hw, HV_PM_CTRL, phy_data);
+
+ /* Return back configuration
+ * 772_29[5] = 0 CS_Mode_Stay_In_K1
+ */
+ e1e_rphy(hw, I217_CGFREG, &phy_data);
+ phy_data &= 0xFFDF;
+ e1e_wphy(hw, I217_CGFREG, phy_data);
+
+ /* Change the MAC/PHY interface to Kumeran
+ * Unforce the SMBus in PHY page769_23[0] = 0
+ * Unforce the SMBus in MAC CTRL_EXT[11] = 0
+ */
+ e1e_rphy(hw, CV_SMB_CTRL, &phy_data);
+ phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, phy_data);
+ mac_data = er32(CTRL_EXT);
+ mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_data);
+}
+#endif /* CONFIG_PM_SLEEP */
+
static int e1000e_pm_freeze(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool present;
+
+ rtnl_lock();
+ present = netif_device_present(netdev);
netif_device_detach(netdev);
- if (netif_running(netdev)) {
+ if (present && netif_running(netdev)) {
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
@@ -6313,6 +6490,8 @@ static int e1000e_pm_freeze(struct device *dev)
e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
+ rtnl_unlock();
+
e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */
@@ -6560,6 +6739,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state, 1);
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ e1000e_set_interrupt_capability(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ rc = e1000_request_irq(adapter);
+ if (rc)
+ goto err_irq;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+err_irq:
+ rtnl_unlock();
+
+ return rc;
+}
+
#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
@@ -6627,29 +6830,12 @@ static int __e1000_resume(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_thaw(struct device *dev)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- u32 err = e1000_request_irq(adapter);
-
- if (err)
- return err;
-
- e1000e_up(adapter);
- }
-
- netif_device_attach(netdev);
-
- return 0;
-}
-
static int e1000e_pm_suspend(struct device *dev)
{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6660,14 +6846,25 @@ static int e1000e_pm_suspend(struct device *dev)
if (rc)
e1000e_pm_thaw(dev);
+ /* Introduce S0ix implementation */
+ if (hw->mac.type >= e1000_pch_cnp)
+ e1000e_s0ix_entry_flow(adapter);
+
return rc;
}
static int e1000e_pm_resume(struct device *dev)
{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
int rc;
+ /* Introduce S0ix implementation */
+ if (hw->mac.type >= e1000_pch_cnp)
+ e1000e_s0ix_exit_flow(adapter);
+
rc = __e1000_resume(pdev);
if (rc)
return rc;
@@ -6818,16 +7015,11 @@ static void e1000_netpoll(struct net_device *netdev)
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
+ e1000e_pm_freeze(&pdev->dev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (netif_running(netdev))
- e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6893,10 +7085,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
+ e1000e_pm_thaw(&pdev->dev);
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
@@ -7407,15 +7596,13 @@ static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- bool down = test_bit(__E1000_DOWN, &adapter->state);
e1000e_ptp_remove(adapter);
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
- if (!down)
- set_bit(__E1000_DOWN, &adapter->state);
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
@@ -7435,9 +7622,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
- /* Don't lie to e1000_close() down the road. */
- if (!down)
- clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
if (pci_dev_run_wake(pdev))
@@ -7567,6 +7751,17 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 1a4c65d9feb4..eaa5a0fb99f0 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,6 +295,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
+ /* fall-through */
+ case e1000_pch_tgp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 47f5ca793970..df59fd1d660c 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -18,6 +18,7 @@
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM5 0x00014 /* Future Extended NVM 5 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
@@ -234,4 +235,7 @@
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+/* PHY registers */
+#define I82579_DFT_CTRL PHY_REG(769, 20)
+
#endif
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b14441944b4b..f306084ca12c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -534,6 +534,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
+void fm10k_iov_update_stats(struct fm10k_intfc *interface);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
@@ -542,6 +543,8 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
int __always_unused min_rate, int max_rate);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats);
/* DebugFS */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index afe1fafd2447..8c50a128df29 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -520,6 +520,27 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
return num_vfs;
}
+/**
+ * fm10k_iov_update_stats - Update stats for all VFs
+ * @interface: device private structure
+ *
+ * Updates the VF statistics for all enabled VFs. Expects to be called by
+ * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
+ * bit is already handled.
+ */
+void fm10k_iov_update_stats(struct fm10k_intfc *interface)
+{
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ int i;
+
+ if (!iov_data)
+ return;
+
+ for (i = 0; i < iov_data->num_vfs; i++)
+ hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
+}
+
static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
struct fm10k_vf_info *vf_info)
{
@@ -650,3 +671,30 @@ int fm10k_ndo_get_vf_config(struct net_device *netdev,
return 0;
}
+
+int fm10k_ndo_get_vf_stats(struct net_device *netdev,
+ int vf_idx, struct ifla_vf_stats *stats)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
+ struct fm10k_hw *hw = &interface->hw;
+ struct fm10k_hw_stats_q *hw_stats;
+ u32 idx, qpp;
+
+ /* verify SR-IOV is active and that vf idx is valid */
+ if (!iov_data || vf_idx >= iov_data->num_vfs)
+ return -EINVAL;
+
+ qpp = fm10k_queues_per_pool(hw);
+ hw_stats = iov_data->vf_info[vf_idx].stats;
+
+ for (idx = 0; idx < qpp; idx++) {
+ stats->rx_packets += hw_stats[idx].rx_packets.count;
+ stats->tx_packets += hw_stats[idx].tx_packets.count;
+ stats->rx_bytes += hw_stats[idx].rx_bytes.count;
+ stats->tx_bytes += hw_stats[idx].tx_bytes.count;
+ stats->rx_dropped += hw_stats[idx].rx_drops.count;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2be9222510e7..17738b0a9873 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -11,7 +11,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.26.1-k"
+#define DRV_VERSION "0.27.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 09f7a246e134..68baee04dc58 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1643,6 +1643,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
+ .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
.ndo_udp_tunnel_add = fm10k_udp_tunnel_add,
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index bb236fa44048..d122d0087191 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -630,6 +630,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
+ /* Update VF statistics */
+ fm10k_iov_update_stats(interface);
+
clear_bit(__FM10K_UPDATING_STATS, interface->state);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 160bc5b78f99..ceb9b791f799 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TLV_H_
#define _FM10K_TLV_H_
@@ -76,8 +76,8 @@ struct fm10k_tlv_attr {
#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 }
#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 }
#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len }
-#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED }
-#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR }
+#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED, 0 }
+#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR, 0, 0 }
struct fm10k_msg_data {
unsigned int id;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 15ac1c7885bc..63968c5d7c5d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -581,6 +581,7 @@ struct fm10k_vf_info {
* at the same offset as the mailbox
*/
struct fm10k_mbx_info mbx; /* PF side of VF mailbox */
+ struct fm10k_hw_stats_q stats[FM10K_MAX_QUEUES_POOL];
int rate; /* Tx BW cap as defined by OS */
u16 glort; /* resource tag for this VF */
u16 sw_vid; /* Switch API assigned VLAN */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2af9f6308f84..cb6367334ca7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1118,6 +1118,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+int i40e_count_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 72c04881d290..9f0a4e92a231 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -508,6 +508,59 @@ shutdown_arq_out:
}
/**
+ * i40e_set_hw_flags - set HW flags
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ hw->flags = 0;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* The ability to RX (not drop) 802.1ad frames */
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ }
+ break;
+ case I40E_MAC_X722:
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* fall through */
+ default:
+ break;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 8)) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 9))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
+/**
* i40e_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
@@ -571,6 +624,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
+ /* Some features were introduced in different FW API version
+ * for different MAC type.
+ */
+ i40e_set_hw_flags(hw);
+
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
&hw->nvm.version);
@@ -596,25 +654,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
- /* Newer versions of firmware require lock when reading the NVM */
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 5))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
hw->aq.api_min_ver >= 7))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8)) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
- hw->flags |= I40E_HW_FLAG_DROP_MODE;
- }
-
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 530613f31527..a23f89fb33ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -2249,7 +2249,13 @@ struct i40e_aqc_phy_register_access {
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_address;
- u8 reserved1[2];
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+ u8 reserved1;
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d37c6e0e5f08..a0791447849a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -29,6 +29,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_B:
case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
@@ -933,10 +934,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
- if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
status = i40e_init_nvm(hw);
return status;
}
@@ -1441,9 +1438,9 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
u32 gpio_val = 0;
u32 port;
- if (!hw->func_caps.led[idx])
+ if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ !hw->func_caps.led[idx])
return 0;
-
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
@@ -1462,8 +1459,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
#define I40E_MAC_ACTIVITY 0xD
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
#define I40E_LED0 22
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -1508,8 +1512,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
int i;
- if (mode & 0xfffffff0)
+ if (mode & ~I40E_LED_MODE_VALID) {
hw_dbg(hw, "invalid mode passed in %X\n", mode);
+ return;
+ }
/* as per the documentation GPIO 22-29 are the LED
* GPIO pins named LED0..LED7
@@ -1519,6 +1525,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
+
+ if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
+ u32 pin_func = 0;
+
+ if (mode & I40E_FW_LED)
+ pin_func = I40E_PIN_FUNC_SDP;
+ else
+ pin_func = I40E_PIN_FUNC_LED;
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
+ gpio_val |= ((pin_func <<
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ }
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -2570,9 +2590,16 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- hw->phy.link_info.req_fec_info =
- abilities.fec_cfg_curr_mod_ext_info &
- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+ if (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_ENABLE_FEC_AUTO)
+ hw->phy.link_info.req_fec_info =
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+ else
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type));
@@ -4884,6 +4911,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -5043,7 +5071,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -5076,7 +5104,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
status =
i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -5115,7 +5143,7 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
@@ -5320,20 +5348,49 @@ do_retry:
}
/**
- * i40e_aq_set_phy_register
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+ u8 mdio_num,
+ struct i40e_aqc_phy_register_access *cmd)
+{
+ if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ ((mdio_num <<
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+ else
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "MDIO I/F number selection not supported by current FW version.\n");
+ }
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
*
* Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
**/
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -5348,26 +5405,36 @@ i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
cmd->reg_address = cpu_to_le32(reg_addr);
cmd->reg_value = cpu_to_le32(reg_val);
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
- * i40e_aq_get_phy_register
+ * i40e_aq_get_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
*
* Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
**/
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -5381,6 +5448,11 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
cmd->dev_address = dev_addr;
cmd->reg_address = cpu_to_le32(reg_addr);
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = le32_to_cpu(cmd->reg_value);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 200a1cb3b536..9de503c5f99b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -889,7 +889,9 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
ret = i40e_read_nvm_module_data(hw,
I40E_SR_EMP_SR_SETTINGS_PTR,
- offset, 1,
+ offset,
+ I40E_LLDP_CURRENT_STATUS_OFFSET,
+ I40E_LLDP_CURRENT_STATUS_SIZE,
&lldp_cfg.adminstatus);
} else {
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2a80c5daa376..ba86ad833bee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -32,6 +32,9 @@
#define I40E_CEE_MAX_FEAT_TYPE 3
#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
+
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index bac4da031f9b..bf15a868292f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -23,6 +23,8 @@
#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
+#define I40E_IS_X710TL_DEVICE(d) \
+ ((d) == I40E_DEV_ID_10G_BASE_T_BC)
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 41e1240acaea..d24d8731bef0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -722,7 +722,14 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ if ((I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) &&
+ (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info)) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
} else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -730,12 +737,6 @@ static void i40e_get_settings_link_up_fec(u8 req_fec_info,
} else {
ethtool_link_ksettings_add_link_mode(ks, advertising,
FEC_NONE);
- if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
- }
}
}
@@ -1437,6 +1438,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_hw *hw = &pf->hw;
i40e_status status = 0;
int err = 0;
+ u8 fec_cfg;
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
@@ -1448,18 +1450,16 @@ static int i40e_get_fec_param(struct net_device *netdev,
}
fecparam->fec = 0;
- if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO)
+ fec_cfg = abilities.fec_cfg_curr_mod_ext_info;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
fecparam->fec |= ETHTOOL_FEC_AUTO;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_RS) ||
- (abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_ABILITY_RS))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_RS |
+ I40E_AQ_SET_FEC_ABILITY_RS))
fecparam->fec |= ETHTOOL_FEC_RS;
- if ((abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_SET_FEC_REQUEST_KR) ||
- (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR))
+ else if (fec_cfg & (I40E_AQ_SET_FEC_REQUEST_KR |
+ I40E_AQ_SET_FEC_ABILITY_KR))
fecparam->fec |= ETHTOOL_FEC_BASER;
- if (abilities.fec_cfg_curr_mod_ext_info == 0)
+ if (fec_cfg == 0)
fecparam->fec |= ETHTOOL_FEC_OFF;
if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
@@ -5112,7 +5112,7 @@ static int i40e_get_module_info(struct net_device *netdev,
case I40E_MODULE_TYPE_SFP:
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
I40E_MODULE_SFF_8472_COMP,
&sff8472_comp, NULL);
if (status)
@@ -5120,7 +5120,7 @@ static int i40e_get_module_info(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_I2C_EEPROM_DEV_ADDR, true,
I40E_MODULE_SFF_8472_SWAP,
&sff8472_swap, NULL);
if (status)
@@ -5152,7 +5152,7 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Read from memory page 0. */
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- 0,
+ 0, true,
I40E_MODULE_REVISION_ADDR,
&sff8636_rev, NULL);
if (status)
@@ -5223,7 +5223,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- addr, offset, &value, NULL);
+ true, addr, offset, &value, NULL);
if (status)
return -EIO;
data[i] = value;
@@ -5242,6 +5242,7 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .get_drvinfo = i40e_get_drvinfo,
.set_eeprom = i40e_set_eeprom,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6031223eafab..9fac1cea6fa5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1110,6 +1110,25 @@ void i40e_update_stats(struct i40e_vsi *vsi)
}
/**
+ * i40e_count_filters - counts VSI mac filters
+ * @vsi: the VSI to be searched
+ *
+ * Returns count of mac filters
+ **/
+int i40e_count_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt;
+ int cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ ++cnt;
+
+ return cnt;
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -3534,14 +3553,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->rx.target_itr =
ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
- q_vector->rx.target_itr);
+ q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr =
ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
- q_vector->tx.target_itr);
+ q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
wr32(hw, I40E_PFINT_RATEN(vector - 1),
@@ -3646,11 +3665,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
i40e_enable_misc_int_causes(pf);
@@ -7168,6 +7187,7 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ch->num_queue_pairs = qcnt;
if (!i40e_setup_channel(pf, vsi, ch)) {
ret = -EINVAL;
+ kfree(ch);
goto err_free;
}
ch->parent_vsi = vsi;
@@ -11396,7 +11416,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
/* associate no queues to the misc vector */
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
i40e_flush(hw);
@@ -12850,6 +12870,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_get_vf_stats = i40e_get_vf_stats,
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
@@ -12911,6 +12932,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_SCTP_CRC |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index e4d8d20baf3b..7164f4ad8120 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -323,20 +323,24 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
/**
* i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
- * @hw: pointer to the HW structure
+ * @hw: Pointer to the HW structure
* @module_ptr: Pointer to module in words with respect to NVM beginning
- * @offset: offset in words from module start
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr)
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
i40e_status status;
+ u16 specific_ptr = 0;
u16 ptr_value = 0;
- u32 flat_offset;
+ u32 offset = 0;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -352,36 +356,35 @@ i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
/* Pointer not initialized */
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
- ptr_value == I40E_NVM_INVALID_VAL)
+ ptr_value == I40E_NVM_INVALID_VAL) {
+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
return I40E_ERR_BAD_PTR;
+ }
/* Check whether the module is in SR mapped area or outside */
if (ptr_value & I40E_PTR_TYPE) {
/* Pointer points outside of the Shared RAM mapped area */
- ptr_value &= ~I40E_PTR_TYPE;
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
- /* PtrValue in 4kB units, need to convert to words */
- ptr_value /= 2;
- flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
- status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!status) {
- status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
- 2 * words_data_size,
- data_ptr, true, NULL);
- i40e_release_nvm(hw);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Reading nvm aq failed.Error code: %d.\n",
- status);
- return I40E_ERR_NVM;
- }
- } else {
- return I40E_ERR_NVM;
- }
+ return I40E_ERR_PARAM;
} else {
/* Read from the Shadow RAM */
- status = i40e_read_nvm_buffer(hw, ptr_value + offset,
- &words_data_size, data_ptr);
+
+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+ &specific_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+
+ offset = ptr_value + module_offset + specific_ptr +
+ data_offset;
+
+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+ data_ptr);
if (status) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm buffer failed.Error code: %d.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5250441bf75b..bbb478f09093 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,10 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
-i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr, u16 offset,
- u16 words_data_size,
- u16 *data_ptr);
+enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
@@ -409,14 +411,24 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_set_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index e3f29dc8b290..b8496037ef7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2960,10 +2960,16 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ }
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index b43ec94a0f29..6ea2867ff60f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -624,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3d2440838822..6a3f0fc56c3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -955,7 +955,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
- vf->num_mac = 0;
}
/* do the accounting and remove additional ADq VSI's */
@@ -2548,20 +2547,12 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct virtchnl_ether_addr_list *al)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ int mac2add_cnt = 0;
int i;
- /* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
- */
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
- }
-
for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
if (is_broadcast_ether_addr(addr) ||
@@ -2585,8 +2576,24 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
+
+ /*count filters that really will be added*/
+ f = i40e_find_mac(vsi, addr);
+ if (!f)
+ ++mac2add_cnt;
}
+ /* If this VF is not privileged, then we can't add more than a limited
+ * number of addresses. Check to make sure that the additions do not
+ * push us over the limit.
+ */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MAC_ADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
return 0;
}
@@ -2640,8 +2647,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac++;
}
}
}
@@ -2689,16 +2694,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
-
- if (vf->pf_set_mac &&
- ether_addr_equal(al->list[i].addr,
- vf->default_lan_addr.addr)) {
- dev_err(&pf->pdev->dev,
- "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
- vf->default_lan_addr.addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- goto error_param;
- }
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2709,8 +2704,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
- } else {
- vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -4531,3 +4524,51 @@ out:
clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
+
+/**
+ * i40e_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-127)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_eth_stats *stats;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+
+ /* validate the request */
+ if (i40e_validate_vf(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi)
+ return -EINVAL;
+
+ i40e_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 7164b9bb294f..631248c0981a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -101,7 +101,6 @@ struct i40e_vf {
bool link_up; /* only valid if VF link is forced */
bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
- u16 num_mac;
u16 num_vlan;
/* ADq related variables */
@@ -139,5 +138,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
+int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b1c3227ae4ab..a05dfecdd9b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
-
- /* Kick start the NAPI context so that receiving will start */
- err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
- if (err)
- return err;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9edde960b4f2..df5a9699276a 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -13,9 +13,12 @@ ice-y := ice_main.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
+ ice_base.o \
ice_lib.o \
+ ice_txrx_lib.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
+ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 45e100666049..f552a67467aa 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -29,10 +29,13 @@
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/ipv6.h>
+#include <linux/pkt_sched.h>
#include <linux/if_bridge.h>
#include <linux/ctype.h>
+#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
+#include <net/xdp_sock.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -42,6 +45,7 @@
#include "ice_sched.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_xsk.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
@@ -78,8 +82,7 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
- (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)))
+#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
@@ -127,6 +130,14 @@ extern const char ice_drv_ver[];
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
+struct ice_txq_meta {
+ u32 q_teid; /* Tx-scheduler element identifier */
+ u16 q_id; /* Entry in VSI's txq_map bitmap */
+ u16 q_handle; /* Relative index of Tx queue within TC */
+ u16 vsi_idx; /* VSI index that Tx queue belongs to */
+ u8 tc; /* TC number that Tx queue belongs to */
+};
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -274,6 +285,13 @@ struct ice_vsi {
u16 num_rx_desc;
u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring **xdp_rings; /* XDP ring array */
+ u16 num_xdp_txq; /* Used XDP queues */
+ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -313,6 +331,7 @@ enum ice_pf_flags {
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
+ ICE_FLAG_LEGACY_RX,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -417,6 +436,37 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
return np->vsi->back;
}
+static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
+{
+ return !!vsi->xdp_prog;
+}
+
+static inline void ice_set_ring_xdp(struct ice_ring *ring)
+{
+ ring->flags |= ICE_TX_FLAGS_RING_XDP;
+}
+
+/**
+ * ice_xsk_umem - get XDP UMEM bound to a ring
+ * @ring - ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * NULL otherwise.
+ */
+static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+{
+ struct xdp_umem **umems = ring->vsi->xsk_umems;
+ int qid = ring->q_index;
+
+ if (ice_ring_is_xdp(ring))
+ qid -= ring->vsi->num_xdp_txq;
+
+ if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
+ return NULL;
+
+ return umems[qid];
+}
+
/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
@@ -443,6 +493,11 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
new file mode 100644
index 000000000000..69d2da14fe5c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_base.h"
+#include "ice_dcb_lib.h"
+
+/**
+ * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
+{
+ int offset, i;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
+ 0, qs_cfg->q_count, 0);
+ if (offset >= qs_cfg->pf_map_size) {
+ mutex_unlock(qs_cfg->qs_mutex);
+ return -ENOMEM;
+ }
+
+ bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
+ for (i = 0; i < qs_cfg->q_count; i++)
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
+{
+ int i, index = 0;
+
+ mutex_lock(qs_cfg->qs_mutex);
+ for (i = 0; i < qs_cfg->q_count; i++) {
+ index = find_next_zero_bit(qs_cfg->pf_map,
+ qs_cfg->pf_map_size, index);
+ if (index >= qs_cfg->pf_map_size)
+ goto err_scatter;
+ set_bit(index, qs_cfg->pf_map);
+ qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return 0;
+err_scatter:
+ for (index = 0; index < i; index++) {
+ clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
+ qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
+ }
+ mutex_unlock(qs_cfg->qs_mutex);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ usleep_range(20, 40);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ /* This will not be called in the driver load path because the netdev
+ * will not be created yet. All other cases with register the NAPI
+ * handler here (i.e. resume, reset/rebuild, etc.)
+ */
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+out:
+ /* tie q_vector and VSI together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
+ struct ice_ring *ring;
+
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
+ v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(&pf->pdev->dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
+ * ice_calc_q_handle - calculate the queue handle
+ * @vsi: VSI that ring belongs to
+ * @ring: ring to get the absolute queue index
+ * @tc: traffic class number
+ */
+static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+{
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc,
+ "XDP ring can't belong to TC other than 0");
+
+ /* Idea here for calculation is that we subtract the number of queue
+ * count from TC that ring belongs to from it's absolute queue index
+ * and as a result we get the queue's index within TC.
+ */
+ return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_LB:
+ /* fall through */
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF ID */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ int chain_len = ICE_MAX_CHAINED_RX_BUFS;
+ struct ice_vsi *vsi = ring->vsi;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ struct ice_hw *hw;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ hw = &vsi->back->hw;
+
+ /* what is Rx queue number in global space of 2K Rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index);
+
+ ring->xsk_umem = ice_xsk_umem(ring);
+ if (ring->xsk_umem) {
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ chain_len = 1;
+ ring->zca.free = ice_zca_free;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca);
+ if (err)
+ return err;
+
+ dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index);
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
+ }
+ }
+ /* Receive Queue Base Address.
+ * Indicates the starting address of the descriptor queue defined in
+ * 128 Byte units.
+ */
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ chain_len * ring->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile ID;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
+ /* configure Rx buffer alignment */
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ ice_clear_ring_build_skb_ena(ring);
+ else
+ ice_set_ring_build_skb_ena(ring);
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ err = ring->xsk_umem ?
+ ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+ if (err)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_umem ? "UMEM enabled " : "",
+ ring->q_index, pf_q);
+
+ return 0;
+}
+
+/**
+ * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
+ *
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
+ *
+ * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
+ */
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
+{
+ int ret = 0;
+
+ ret = __ice_vsi_get_qs_contig(qs_cfg);
+ if (ret) {
+ /* contig failed, so try with scatter approach */
+ qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
+ qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
+ qs_cfg->scatter_count);
+ ret = __ice_vsi_get_qs_sc(qs_cfg);
+ }
+ return ret;
+}
+
+/**
+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ * @rxq_idx: Rx queue index
+ */
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+ u32 rx_reg;
+
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ return 0;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+
+ return ret;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ int err;
+
+ if (vsi->q_vectors[0]) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ num_q_vectors = vsi->num_q_vectors;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(&pf->pdev->dev,
+ "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_cfg_txq - Configure single Tx queue
+ * @vsi: the VSI that queue belongs to
+ * @ring: Tx ring to be configured
+ * @qg_buf: queue group buffer
+ */
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf)
+{
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ u8 buf_len = sizeof(*qg_buf);
+ enum ice_status status;
+ u16 pf_q;
+ u8 tc;
+
+ pf_q = ring->reg_idx;
+ ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ /* Add unique software queue handle of the Tx queue per
+ * TC into the VSI Tx ring
+ */
+ ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ ring->txq_teid = le32_to_cpu(txq->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ ice_cfg_itr_gran(hw);
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_RX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ /* if this value is set then don't overwrite with default */
+ if (!rc->itr_setting)
+ rc->itr_setting = ICE_DFLT_TX_ITR;
+
+ rc->target_itr = ITR_TO_REG(rc->itr_setting);
+ rc->next_update = jiffies + 1;
+ rc->current_itr = rc->target_itr;
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
+ ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
+ }
+}
+
+/**
+ * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * @vsi: the VSI being configured
+ * @txq: Tx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
+ val);
+ }
+ ice_flush(hw);
+}
+
+/**
+ * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
+ * @vsi: the VSI being configured
+ * @rxq: Rx queue being mapped to MSI-X vector
+ * @msix_idx: MSI-X vector index within the function
+ * @itr_idx: ITR index of the interrupt cause
+ *
+ * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
+ * within the function space.
+ */
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_trigger_sw_intr - trigger a software interrupt
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector to trigger the software interrupt for
+ */
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+{
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_M);
+}
+
+/**
+ * ice_vsi_stop_tx_ring - Disable single Tx ring
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative ID of VF/VM
+ * @ring: Tx ring to be stopped
+ * @txq_meta: Meta data of Tx ring to be stopped
+ */
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 val;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(ring->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(ring->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ q_vector = ring->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
+ status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
+ txq_meta->tc, 1, &txq_meta->q_handle,
+ &txq_meta->q_id, &txq_meta->q_teid, rst_src,
+ rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an
+ * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * This is not an error as the reset operation disables
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_txq_meta - Prepare the Tx queue's meta data
+ * @vsi: VSI that ring belongs to
+ * @ring: ring that txq_meta will be based on
+ * @txq_meta: a helper struct that wraps Tx queue's information
+ *
+ * Set up a helper struct that will contain all the necessary fields that
+ * are needed for stopping Tx queue
+ */
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ u8 tc;
+
+ if (IS_ENABLED(CONFIG_DCB))
+ tc = ring->dcb_tc;
+ else
+ tc = 0;
+
+ txq_meta->q_id = ring->reg_idx;
+ txq_meta->q_teid = ring->txq_teid;
+ txq_meta->q_handle = ring->q_handle;
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
new file mode 100644
index 000000000000..407995e8e944
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_BASE_H_
+#define _ICE_BASE_H_
+
+#include "ice.h"
+
+int ice_setup_rx_ctx(struct ice_ring *ring);
+int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_aqc_add_tx_qgrp *qg_buf);
+void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void
+ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
+void
+ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 661a6f7bca64..d11a0aab01ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -5,6 +5,7 @@
#define _ICE_DCB_LIB_H_
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#ifdef CONFIG_DCB
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7e23034df955..7e779060069c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -156,6 +156,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -623,7 +624,7 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
continue;
rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page);
+ received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -1256,6 +1257,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
"Fail to enable MIB change events\n");
}
}
+ if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
+ /* down and up VSI so that changes of Rx cfg are reflected. */
+ ice_down(vsi);
+ ice_up(vsi);
+ }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
@@ -2577,6 +2583,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *xdp_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2611,6 +2618,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
return 0;
}
+ /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ * disallow changing the number of descriptors -- regardless
+ * if the netdev is running or not.
+ */
+ if (ice_xsk_any_rx_ring_ena(vsi))
+ return -EBUSY;
+
while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
timeout--;
if (!timeout)
@@ -2624,6 +2638,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
vsi->tx_rings[i]->count = new_tx_cnt;
for (i = 0; i < vsi->alloc_rxq; i++)
vsi->rx_rings[i]->count = new_rx_cnt;
+ if (ice_is_xdp_ena_vsi(vsi))
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->count = new_tx_cnt;
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -2650,15 +2669,43 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
tx_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
- while (i) {
- i--;
+ while (i--)
ice_clean_tx_ring(&tx_rings[i]);
- }
devm_kfree(&pf->pdev->dev, tx_rings);
goto done;
}
}
+ if (!ice_is_xdp_ena_vsi(vsi))
+ goto process_rx;
+
+ /* alloc updated XDP resources */
+ netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
+ vsi->xdp_rings[0]->count, new_tx_cnt);
+
+ xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
+ sizeof(*xdp_rings), GFP_KERNEL);
+ if (!xdp_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ /* clone ring and setup updated count */
+ xdp_rings[i] = *vsi->xdp_rings[i];
+ xdp_rings[i].count = new_tx_cnt;
+ xdp_rings[i].desc = NULL;
+ xdp_rings[i].tx_buf = NULL;
+ err = ice_setup_tx_ring(&xdp_rings[i]);
+ if (err) {
+ while (i--)
+ ice_clean_tx_ring(&xdp_rings[i]);
+ devm_kfree(&pf->pdev->dev, xdp_rings);
+ goto free_tx;
+ }
+ ice_set_ring_xdp(&xdp_rings[i]);
+ }
+
process_rx:
if (new_rx_cnt == vsi->rx_rings[0]->count)
goto process_link;
@@ -2737,6 +2784,16 @@ process_link:
devm_kfree(&pf->pdev->dev, rx_rings);
}
+ if (xdp_rings) {
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ *vsi->xdp_rings[i] = xdp_rings[i];
+ }
+ devm_kfree(&pf->pdev->dev, xdp_rings);
+ }
+
+ vsi->num_tx_desc = new_tx_cnt;
+ vsi->num_rx_desc = new_rx_cnt;
ice_up(vsi);
}
goto done;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index cc755382df25..b1e96cac5b1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2,235 +2,11 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
- *
- * Configure the Rx descriptor ring in RLAN context.
- */
-static int ice_setup_rx_ctx(struct ice_ring *ring)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
- u32 rxdid = ICE_RXDID_FLEX_NIC;
- struct ice_rlan_ctx rlan_ctx;
- u32 regval;
- u16 pf_q;
- int err;
-
- /* what is Rx queue number in global space of 2K Rx queues */
- pf_q = vsi->rxq_map[ring->q_index];
-
- /* clear the context structure first */
- memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
- rlan_ctx.base = ring->dma >> 7;
-
- rlan_ctx.qlen = ring->count;
-
- /* Receive Packet Data Buffer Size.
- * The Packet Data Buffer Size is defined in 128 byte units.
- */
- rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
-
- /* use 32 byte descriptors */
- rlan_ctx.dsize = 1;
-
- /* Strip the Ethernet CRC bytes before the packet is posted to host
- * memory.
- */
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
-
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
- rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
-
- /* This controls whether VLAN is stripped from inner headers
- * The VLAN in the inner L2 header is stripped to the receive
- * descriptor if enabled by this flag.
- */
- rlan_ctx.showiv = 0;
-
- /* Max packet size for this queue - must not be set to a larger value
- * than 5 x DBUF
- */
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
- /* Rx queue threshold in units of 64 */
- rlan_ctx.lrxqthresh = 1;
-
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile ID;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
- }
-
- /* Absolute queue number out of 2K needs to be passed */
- err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
- if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
- pf_q, err);
- return -EIO;
- }
-
- if (vsi->type == ICE_VSI_VF)
- return 0;
-
- /* init queue specific tail register */
- ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
- writel(0, ring->tail);
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
-
- return 0;
-}
-
-/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
- *
- * Configure the Tx descriptor ring in TLAN context.
- */
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring);
-
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
-
- /* queue belongs to a specific VSI type
- * VF / VM index should be programmed per vmvf_type setting:
- * for vmvf_type = VF, it is VF number between 0-256
- * for vmvf_type = VM, it is VM number between 0-767
- * for PF or EMP this field should be set to zero
- */
- switch (vsi->type) {
- case ICE_VSI_LB:
- /* fall through */
- case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
- break;
- case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
- break;
- default:
- return;
- }
-
- /* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
-
- tlan_ctx->tso_ena = ICE_TX_LEGACY;
- tlan_ctx->tso_qnum = pf_q;
-
- /* Legacy or Advanced Host Interface:
- * 0: Advanced Host Interface
- * 1: Legacy Host Interface
- */
- tlan_ctx->legacy_int = ICE_TX_LEGACY;
-}
-
-/**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
- */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
-{
- int i;
-
- for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
- QRX_CTRL_QENA_STAT_M))
- return 0;
-
- usleep_range(20, 40);
- }
-
- return -ETIMEDOUT;
-}
-
-/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
- * @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
-{
- int pf_q = vsi->rxq_map[rxq_idx];
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int ret = 0;
- u32 rx_reg;
-
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
-
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- return 0;
-
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
-
- return ret;
-}
-
-/**
* ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
@@ -270,7 +46,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rings;
- vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
+ vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq),
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
@@ -281,7 +58,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rxq_map)
goto err_rxq_map;
-
/* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB)
return 0;
@@ -606,88 +382,6 @@ unlock_pf:
}
/**
- * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
-{
- int offset, i;
-
- mutex_lock(qs_cfg->qs_mutex);
- offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
- 0, qs_cfg->q_count, 0);
- if (offset >= qs_cfg->pf_map_size) {
- mutex_unlock(qs_cfg->qs_mutex);
- return -ENOMEM;
- }
-
- bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
- for (i = 0; i < qs_cfg->q_count; i++)
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-}
-
-/**
- * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
-{
- int i, index = 0;
-
- mutex_lock(qs_cfg->qs_mutex);
- for (i = 0; i < qs_cfg->q_count; i++) {
- index = find_next_zero_bit(qs_cfg->pf_map,
- qs_cfg->pf_map_size, index);
- if (index >= qs_cfg->pf_map_size)
- goto err_scatter;
- set_bit(index, qs_cfg->pf_map);
- qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return 0;
-err_scatter:
- for (index = 0; index < i; index++) {
- clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
- qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
- }
- mutex_unlock(qs_cfg->qs_mutex);
-
- return -ENOMEM;
-}
-
-/**
- * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for pf->vsi queues assignment
- *
- * This function first tries to find contiguous space. If it is not successful,
- * it tries with the scatter approach.
- *
- * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
- */
-static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
-{
- int ret = 0;
-
- ret = __ice_vsi_get_qs_contig(qs_cfg);
- if (ret) {
- /* contig failed, so try with scatter approach */
- qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
- qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
- qs_cfg->scatter_count);
- ret = __ice_vsi_get_qs_sc(qs_cfg);
- }
- return ret;
-}
-
-/**
* ice_vsi_get_qs - Assign queues from PF to VSI
* @vsi: the VSI to assign queues to
*
@@ -1098,129 +792,6 @@ static int ice_vsi_init(struct ice_vsi *vsi)
}
/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
- */
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_q_vector *q_vector;
- struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
-
- if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
- v_idx);
- return;
- }
- q_vector = vsi->q_vectors[v_idx];
-
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
-
- /* only VSI with an associated netdev is set up with NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
-
- devm_kfree(&pf->pdev->dev, q_vector);
- vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
- int v_idx;
-
- ice_for_each_q_vector(vsi, v_idx)
- ice_free_q_vector(vsi, v_idx);
-}
-
-/**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the VSI struct
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
-
- /* allocate q_vector */
- q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
- q_vector->vsi = vsi;
- q_vector->v_idx = v_idx;
- if (vsi->type == ICE_VSI_VF)
- goto out;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
- /* This will not be called in the driver load path because the netdev
- * will not be created yet. All other cases with register the NAPI
- * handler here (i.e. resume, reset/rebuild, etc.)
- */
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
-
-out:
- /* tie q_vector and VSI together */
- vsi->q_vectors[v_idx] = q_vector;
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- int err;
-
- if (vsi->q_vectors[0]) {
- dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
- vsi->vsi_num);
- return -EEXIST;
- }
-
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = ice_vsi_alloc_q_vector(vsi, v_idx);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
-
- dev_err(&pf->pdev->dev,
- "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
-}
-
-/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -1341,66 +912,6 @@ err_out:
}
/**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
- */
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#else
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-#endif /* CONFIG_DCB */
-{
- int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
- int v_id;
-
- /* initially assigning remaining rings count to VSIs num queue value */
- tx_rings_rem = vsi->num_txq;
- rx_rings_rem = vsi->num_rxq;
-
- for (v_id = 0; v_id < q_vectors; v_id++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
- /* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
- q_vector->tx.itr_idx = ICE_TX_ITR;
- q_base = vsi->num_txq - tx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- }
- tx_rings_rem -= tx_rings_per_v;
-
- /* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
- q_vector->rx.itr_idx = ICE_RX_ITR;
- q_base = vsi->num_rxq - rx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- }
- rx_rings_rem -= rx_rings_per_v;
- }
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1674,6 +1185,31 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
}
/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+#if (PAGE_SIZE < 8192)
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+#else
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+#endif
+ }
+}
+
+/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -1687,13 +1223,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF)
goto setup_rings;
- if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
- vsi->max_frame = vsi->netdev->mtu +
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = ICE_RXBUF_2048;
-
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
for (i = 0; i < vsi->num_rxq; i++) {
@@ -1712,101 +1242,34 @@ setup_rings:
}
/**
- * ice_vsi_cfg_txq - Configure single Tx queue
- * @vsi: the VSI that queue belongs to
- * @ring: Tx ring to be configured
- * @tc_q_idx: queue index within given TC
- * @qg_buf: queue group buffer
- * @tc: TC that Tx ring belongs to
- */
-static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
- struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
-{
- struct ice_tlan_ctx tlan_ctx = { 0 };
- struct ice_aqc_add_txqs_perq *txq;
- struct ice_pf *pf = vsi->back;
- u8 buf_len = sizeof(*qg_buf);
- enum ice_status status;
- u16 pf_q;
-
- pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as
- * transmit comm scheduler queue doorbell.
- */
- ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
-
- /* Add unique software queue handle of the Tx queue per
- * TC into the VSI Tx ring
- */
- ring->q_handle = tc_q_idx;
-
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- return -ENODEV;
- }
-
- /* Add Tx Queue TEID into the VSI Tx ring from the
- * response. This will complete configuring and
- * enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- ring->txq_teid = le32_to_cpu(txq->q_teid);
-
- return 0;
-}
-
-/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
- * @offset: offset within vsi->txq_map
*
* Return 0 on success and a negative value on error
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_pf *pf = vsi->back;
- u16 q_idx = 0, i;
+ u16 q_idx = 0;
int err = 0;
- u8 tc;
- qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
- /* set up and configure the Tx queues for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
- qg_buf, tc);
- if (err)
- goto err_cfg_txqs;
-
- q_idx++;
- }
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ goto err_cfg_txqs;
}
+
err_cfg_txqs:
- devm_kfree(&pf->pdev->dev, qg_buf);
+ kfree(qg_buf);
return err;
}
@@ -1819,159 +1282,46 @@ err_cfg_txqs:
*/
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
-}
-
-/**
- * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
- * @intrl: interrupt rate limit in usecs
- * @gran: interrupt rate limit granularity in usecs
- *
- * This function converts a decimal interrupt rate limit in usecs to the format
- * expected by firmware.
- */
-u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
-{
- u32 val = intrl / gran;
-
- if (val)
- return val | GLINT_RATE_INTRL_ENA_M;
- return 0;
-}
-
-/**
- * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
- * @hw: board specific structure
- */
-static void ice_cfg_itr_gran(struct ice_hw *hw)
-{
- u32 regval = rd32(hw, GLINT_CTL);
-
- /* no need to update global register if ITR gran is already set */
- if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
- return;
-
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
- wr32(hw, GLINT_CTL, regval);
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
}
/**
- * ice_cfg_itr - configure the initial interrupt throttle values
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector that's being configured
- *
- * Configure interrupt throttling values for the ring containers that are
- * associated with the interrupt vector passed in.
- */
-static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- ice_cfg_itr_gran(hw);
-
- if (q_vector->num_ring_rx) {
- struct ice_ring_container *rc = &q_vector->rx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_RX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
-
- if (q_vector->num_ring_tx) {
- struct ice_ring_container *rc = &q_vector->tx;
-
- /* if this value is set then don't overwrite with default */
- if (!rc->itr_setting)
- rc->itr_setting = ICE_DFLT_TX_ITR;
-
- rc->target_itr = ITR_TO_REG(rc->itr_setting);
- rc->next_update = jiffies + 1;
- rc->current_itr = rc->target_itr;
- wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
- ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
- }
-}
-
-/**
- * ice_cfg_txq_interrupt - configure interrupt on Tx queue
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
* @vsi: the VSI being configured
- * @txq: Tx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
*
- * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
- * within the function space.
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
+ int ret;
+ int i;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
+ if (ret)
+ return ret;
- val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ return ret;
}
/**
- * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
- * @vsi: the VSI being configured
- * @rxq: Rx queue being mapped to MSI-X vector
- * @msix_idx: MSI-X vector index within the function
- * @itr_idx: ITR index of the interrupt cause
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
*
- * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
- * within the function space.
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
*/
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#else
-static void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
-#endif /* CONFIG_PCI_IOV */
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
-
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
-
- val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
-
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ u32 val = intrl / gran;
- ice_flush(hw);
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
}
/**
@@ -2134,109 +1484,6 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_trigger_sw_intr - trigger a software interrupt
- * @hw: pointer to the HW structure
- * @q_vector: interrupt vector to trigger the software interrupt for
- */
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
-{
- wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
- (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
- GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_INTENA_M);
-}
-
-/**
- * ice_vsi_stop_tx_ring - Disable single Tx ring
- * @vsi: the VSI being configured
- * @rst_src: reset source
- * @rel_vmvf_num: Relative ID of VF/VM
- * @ring: Tx ring to be stopped
- * @txq_meta: Meta data of Tx ring to be stopped
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
- struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u32 val;
-
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(ring->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(ring->reg_idx), val);
-
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector
- * associated to the queue to schedule NAPI handler
- */
- q_vector = ring->q_vector;
- if (q_vector)
- ice_trigger_sw_intr(hw, q_vector);
-
- status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
- txq_meta->tc, 1, &txq_meta->q_handle,
- &txq_meta->q_id, &txq_meta->q_teid, rst_src,
- rel_vmvf_num, NULL);
-
- /* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
- * This is not an error as the reset operation disables
- * queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
- } else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/**
- * ice_fill_txq_meta - Prepare the Tx queue's meta data
- * @vsi: VSI that ring belongs to
- * @ring: ring that txq_meta will be based on
- * @txq_meta: a helper struct that wraps Tx queue's information
- *
- * Set up a helper struct that will contain all the necessary fields that
- * are needed for stopping Tx queue
- */
-#ifndef CONFIG_PCI_IOV
-static
-#endif /* !CONFIG_PCI_IOV */
-void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta)
-{
- u8 tc = 0;
-
-#ifdef CONFIG_DCB
- tc = ring->dcb_tc;
-#endif /* CONFIG_DCB */
- txq_meta->q_id = ring->reg_idx;
- txq_meta->q_teid = ring->txq_teid;
- txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
-}
-
-/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
@@ -2247,34 +1494,24 @@ static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings)
{
- u16 i, q_idx = 0;
- int status;
- u8 tc;
+ u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
- /* set up the Tx queue list to be disabled for each enabled TC */
- ice_for_each_traffic_class(tc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
- break;
-
- for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_txq_meta txq_meta = { };
-
- if (!rings || !rings[q_idx])
- return -EINVAL;
+ for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
+ struct ice_txq_meta txq_meta = { };
+ int status;
- ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
- status = ice_vsi_stop_tx_ring(vsi, rst_src,
- rel_vmvf_num,
- rings[q_idx], &txq_meta);
+ if (!rings || !rings[q_idx])
+ return -EINVAL;
- if (status)
- return status;
+ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
+ status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
+ rings[q_idx], &txq_meta);
- q_idx++;
- }
+ if (status)
+ return status;
}
return 0;
@@ -2294,6 +1531,15 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
}
/**
+ * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
+ * @vsi: the VSI being configured
+ */
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -2690,6 +1936,11 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
for (q = 0; q < q_vector->num_ring_tx; q++) {
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ u32 xdp_txq = txq + vsi->num_xdp_txq;
+
+ wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
+ }
txq++;
}
@@ -3064,6 +2315,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
@@ -3085,7 +2341,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
-
switch (vsi->type) {
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3105,6 +2360,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto err_vectors;
+ }
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -3131,9 +2392,13 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ for (i = 0; i < vsi->tc_cfg.numtc; i++) {
max_txqs[i] = vsi->alloc_txq;
+ if (ice_is_xdp_ena_vsi(vsi))
+ max_txqs[i] += vsi->num_xdp_txq;
+ }
+
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
@@ -3271,6 +2536,51 @@ char *ice_nvm_version_str(struct ice_hw *hw)
}
/**
+ * ice_update_ring_stats - Update ring statistics
+ * @ring: ring to update
+ * @cont: used to increment per-vector counters
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ *
+ * This function assumes that caller has acquired a u64_stats_sync lock.
+ */
+static void
+ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
+ u64 pkts, u64 bytes)
+{
+ ring->stats.bytes += bytes;
+ ring->stats.pkts += pkts;
+ cont->total_bytes += bytes;
+ cont->total_pkts += pkts;
+}
+
+/**
+ * ice_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
+ u64_stats_update_end(&tx_ring->syncp);
+}
+
+/**
+ * ice_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
+ u64_stats_update_end(&rx_ring->syncp);
+}
+
+/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
* @macaddr: the MAC address to be added.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 47bc033fff20..8d5a7978e066 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,19 +6,6 @@
#include "ice.h"
-struct ice_txq_meta {
- /* Tx-scheduler element identifier */
- u32 q_teid;
- /* Entry in VSI's txq_map bitmap */
- u16 q_id;
- /* Relative index of Tx queue within TC */
- u16 q_handle;
- /* VSI index that Tx queue belongs to */
- u16 vsi_idx;
- /* TC number that Tx queue belongs to */
- u8 tc;
-};
-
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr);
@@ -33,24 +20,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-#ifdef CONFIG_PCI_IOV
-void
-ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
-
-void
-ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-
-int
-ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
- struct ice_txq_meta *txq_meta);
-
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
-#endif /* CONFIG_PCI_IOV */
-
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
@@ -67,6 +36,10 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
+
+int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -98,16 +71,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
-void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
-
void ice_vsi_put_qs(struct ice_vsi *vsi);
-#ifdef CONFIG_DCB
-void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
-#endif /* CONFIG_DCB */
-
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -118,6 +83,12 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+
+void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
+
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 214cd6eca405..363b284e8aa1 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -1661,6 +1662,324 @@ free_q_irqs:
}
/**
+ * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
+ * @vsi: VSI to setup Tx rings used by XDP
+ *
+ * Return 0 on success and negative value on error
+ */
+static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ int i;
+
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring *xdp_ring;
+
+ xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
+
+ if (!xdp_ring)
+ goto free_xdp_rings;
+
+ xdp_ring->q_index = xdp_q_idx;
+ xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
+ xdp_ring->ring_active = false;
+ xdp_ring->vsi = vsi;
+ xdp_ring->netdev = NULL;
+ xdp_ring->dev = dev;
+ xdp_ring->count = vsi->num_tx_desc;
+ vsi->xdp_rings[i] = xdp_ring;
+ if (ice_setup_tx_ring(xdp_ring))
+ goto free_xdp_rings;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ return 0;
+
+free_xdp_rings:
+ for (; i >= 0; i--)
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
+ * @vsi: VSI to set the bpf prog on
+ * @prog: the bpf prog pointer
+ */
+static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+ int i;
+
+ old_prog = xchg(&vsi->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ ice_for_each_rxq(vsi, i)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+}
+
+/**
+ * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
+ * @vsi: VSI to bring up Tx rings used by XDP
+ * @prog: bpf program that will be assigned to VSI
+ *
+ * Return 0 on success and negative value on error
+ */
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ struct ice_pf *pf = vsi->back;
+ struct ice_qs_cfg xdp_qs_cfg = {
+ .qs_mutex = &pf->avail_q_mutex,
+ .pf_map = pf->avail_txqs,
+ .pf_map_size = pf->max_pf_txqs,
+ .q_count = vsi->num_xdp_txq,
+ .scatter_count = ICE_MAX_SCATTER_TXQS,
+ .vsi_map = vsi->txq_map,
+ .vsi_map_offset = vsi->alloc_txq,
+ .mapping_mode = ICE_VSI_MAP_CONTIG
+ };
+ enum ice_status status;
+ int i, v_idx;
+
+ vsi->xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq,
+ sizeof(*vsi->xdp_rings), GFP_KERNEL);
+ if (!vsi->xdp_rings)
+ return -ENOMEM;
+
+ vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
+ if (__ice_vsi_get_qs(&xdp_qs_cfg))
+ goto err_map_xdp;
+
+ if (ice_xdp_alloc_setup_rings(vsi))
+ goto clear_xdp_rings;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ /* omit the scheduler update if in reset path; XDP queues will be
+ * taken into account at the end of ice_vsi_rebuild, where
+ * ice_cfg_vsi_lan is being called
+ */
+ if (ice_is_reset_in_progress(pf->state))
+ return 0;
+
+ /* tell the Tx scheduler that right now we have
+ * additional queues
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed VSI LAN queue config for XDP, error:%d\n",
+ status);
+ goto clear_xdp_rings;
+ }
+ ice_vsi_assign_bpf_prog(vsi, prog);
+
+ return 0;
+clear_xdp_rings:
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+err_map_xdp:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ devm_kfree(&pf->pdev->dev, vsi->xdp_rings);
+ return -ENOMEM;
+}
+
+/**
+ * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
+ * @vsi: VSI to remove XDP rings
+ *
+ * Detach XDP rings from irq vectors, clean up the PF bitmap and free
+ * resources
+ */
+int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf = vsi->back;
+ int i, v_idx;
+
+ /* q_vectors are freed in reset path so there's no point in detaching
+ * rings; in case of rebuild being triggered not from reset reset bits
+ * in pf->state won't be set, so additionally check first q_vector
+ * against NULL
+ */
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ goto free_qmap;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_ring *ring;
+
+ ice_for_each_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.ring = ring;
+ }
+
+free_qmap:
+ mutex_lock(&pf->avail_q_mutex);
+ for (i = 0; i < vsi->num_xdp_txq; i++) {
+ clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
+ vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->num_xdp_txq; i++)
+ if (vsi->xdp_rings[i]) {
+ if (vsi->xdp_rings[i]->desc)
+ ice_free_tx_ring(vsi->xdp_rings[i]);
+ kfree_rcu(vsi->xdp_rings[i], rcu);
+ vsi->xdp_rings[i] = NULL;
+ }
+
+ devm_kfree(&pf->pdev->dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
+ if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ return 0;
+
+ ice_vsi_assign_bpf_prog(vsi, NULL);
+
+ /* notify Tx scheduler that we destroyed XDP queues and bring
+ * back the old number of child nodes
+ */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+}
+
+/**
+ * ice_xdp_setup_prog - Add or remove XDP eBPF program
+ * @vsi: VSI to setup XDP for
+ * @prog: XDP program
+ * @extack: netlink extended ack
+ */
+static int
+ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ bool if_running = netif_running(vsi->netdev);
+ int ret = 0, xdp_ring_err = 0;
+
+ if (frame_size > vsi->rx_buf_len) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
+ return -EOPNOTSUPP;
+ }
+
+ /* need to stop netdev while setting up the program for Rx rings */
+ if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Preparing device for XDP attach failed");
+ return ret;
+ }
+ }
+
+ if (!ice_is_xdp_ena_vsi(vsi) && prog) {
+ vsi->num_xdp_txq = vsi->alloc_txq;
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Setting up XDP Tx resources failed");
+ } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Freeing XDP Tx resources failed");
+ } else {
+ ice_vsi_assign_bpf_prog(vsi, prog);
+ }
+
+ if (if_running)
+ ret = ice_up(vsi);
+
+ if (!ret && prog && vsi->xsk_umems) {
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_ring *rx_ring = vsi->rx_rings[i];
+
+ if (rx_ring->xsk_umem)
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+ }
+
+ return (ret || xdp_ring_err) ? -ENOMEM : 0;
+}
+
+/**
+ * ice_xdp - implements XDP handler
+ * @dev: netdevice
+ * @xdp: XDP command
+ */
+static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (vsi->type != ICE_VSI_PF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "XDP can be loaded only on PF VSI");
+ return -EINVAL;
+ }
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
+ return 0;
+ case XDP_SETUP_XSK_UMEM:
+ return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
@@ -2219,6 +2538,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
status = -ENODEV;
goto unroll_vsi_setup;
}
+ /* netdev has to be configured before setting frame size */
+ ice_vsi_cfg_frame_size(vsi);
/* registering the NAPI handler requires both the queues and
* netdev to be created, which are done in ice_pf_vsi_setup()
@@ -3505,6 +3826,8 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
+ if (!err && ice_is_xdp_ena_vsi(vsi))
+ err = ice_vsi_cfg_xdp_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -3920,6 +4243,13 @@ int ice_down(struct ice_vsi *vsi)
netdev_err(vsi->netdev,
"Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
+ if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
+ tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
+ if (tx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop XDP rings, VSI %d error %d\n",
+ vsi->vsi_num, tx_err);
+ }
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
@@ -4329,6 +4659,18 @@ clear_recovery:
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -4347,6 +4689,16 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ int frame_size = ice_max_xdp_frame_size(vsi);
+
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
+ netdev_err(netdev, "max MTU for XDP usage is %d\n",
+ frame_size - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
+ }
+
if (new_mtu < netdev->min_mtu) {
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
netdev->min_mtu);
@@ -4878,4 +5230,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
.ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 33dd103035dc..40a29b9d3034 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -5,8 +5,13 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
#include "ice.h"
#include "ice_dcb_lib.h"
+#include "ice_xsk.h"
#define ICE_RX_HDR_SIZE 256
@@ -19,7 +24,10 @@ static void
ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
- dev_kfree_skb_any(tx_buf->skb);
+ if (ice_ring_is_xdp(ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buf->skb);
if (dma_unmap_len(tx_buf, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buf, dma),
@@ -51,6 +59,11 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ ice_xsk_clean_xdp_ring(tx_ring);
+ goto tx_skip_free;
+ }
+
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buf)
return;
@@ -59,6 +72,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
+tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
/* Zero out the descriptor ring */
@@ -136,8 +150,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ if (ice_ring_is_xdp(tx_ring))
+ page_frag_free(tx_buf->raw_buf);
+ else
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -188,12 +205,11 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.pkts += total_pkts;
- u64_stats_update_end(&tx_ring->syncp);
- tx_ring->q_vector->tx.total_bytes += total_bytes;
- tx_ring->q_vector->tx.total_pkts += total_pkts;
+
+ ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
+
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -273,6 +289,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
+ if (rx_ring->xsk_umem) {
+ ice_xsk_clean_rx_ring(rx_ring);
+ goto rx_skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -289,10 +310,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
*/
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
rx_buf->page_offset,
- ICE_RXBUF_2048, DMA_FROM_DEVICE);
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
/* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
@@ -300,6 +322,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
rx_buf->page_offset = 0;
}
+rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
@@ -319,6 +342,10 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
void ice_free_rx_ring(struct ice_ring *rx_ring)
{
ice_clean_rx_ring(rx_ring);
+ if (rx_ring->vsi->type == ICE_VSI_PF)
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
devm_kfree(rx_ring->dev, rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
@@ -363,6 +390,15 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+
+ if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->q_index))
+ goto err;
return 0;
err:
@@ -372,34 +408,110 @@ err:
}
/**
- * ice_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
*/
-static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
- rx_ring->next_to_use = val;
+ return 0;
+}
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
+/**
+ * ice_run_xdp - Executes an XDP program on initialized xdp_buff
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = ICE_XDP_PASS;
+ struct ice_ring *xdp_ring;
+ u32 act;
- /* QRX_TAIL will be updated with any tail value, but hardware ignores
- * the lower 3 bits. This makes it so we only bump tail on meaningful
- * boundaries. Also, this allows us to bump tail on intervals of 8 up to
- * the budget depending on the current traffic load.
- */
- val &= ~0x7;
- if (prev_ntu != val) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * ice_xdp_xmit - submit packets to XDP ring for transmission
+ * @dev: netdev
+ * @n: number of XDP frames to be transmitted
+ * @frames: XDP frames to be transmitted
+ * @flags: transmit flags
+ *
+ * Returns number of frames successfully sent. Frames that fail are
+ * free'ed via XDP return API.
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ */
+int
+ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *xdp_ring;
+ int drops = 0, i;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdp_ring = vsi->xdp_rings[queue_index];
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ if (err != ICE_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
}
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ ice_xdp_ring_update_tail(xdp_ring);
+
+ return n - drops;
}
/**
@@ -423,28 +535,28 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
}
/* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, ice_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ice_rx_offset(rx_ring);
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -486,7 +598,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- ICE_RXBUF_2048,
+ rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
@@ -557,9 +669,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
*/
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
-#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -572,7 +681,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
#else
- if (rx_buf->page_offset > last_offset)
+#define ICE_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
+ if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
#endif /* PAGE_SIZE < 8192) */
@@ -590,6 +701,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
/**
* ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: buffer containing page to add
* @skb: sk_buff to place the data into
* @size: packet length from rx_desc
@@ -599,13 +711,13 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
if (!size)
@@ -679,10 +791,64 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
}
/**
+ * ice_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @xdp: xdp_buff pointing to the data
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *
+ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
+#endif
+ struct sk_buff *skb;
+
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points exactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
+#if L1_CACHE_BYTES < 128
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
+#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(xdp->data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* must to record Rx queue, otherwise OS features such as
+ * symmetric queue won't work
+ */
+ skb_record_rx_queue(skb, rx_ring->q_index);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+
+ return skb;
+}
+
+/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
@@ -690,16 +856,16 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
*/
static struct sk_buff *
ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
+ prefetch(xdp->data);
#if L1_CACHE_BYTES < 128
- prefetch((u8 *)va + L1_CACHE_BYTES);
+ prefetch((void *)(xdp->data + L1_CACHE_BYTES));
#endif /* L1_CACHE_BYTES */
/* allocate a skb to store the frags */
@@ -712,10 +878,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* Determine available headroom for copy */
headlen = size;
if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
@@ -723,7 +890,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE >= 8192)
unsigned int truesize = SKB_DATA_ALIGN(size);
#else
- unsigned int truesize = ICE_RXBUF_2048;
+ unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
skb_add_rx_frag(skb, 0, rx_buf->page,
rx_buf->page_offset + headlen, size, truesize);
@@ -745,11 +912,18 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
*
- * This function will clean up the contents of the rx_buf. It will
- * either recycle the buffer or unmap it and free the associated resources.
+ * This function will update next_to_clean and then clean up the contents
+ * of the rx_buf. It will either recycle the buffer or unmap it and free
+ * the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
if (!rx_buf)
return;
@@ -759,8 +933,9 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
+ ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
}
@@ -791,206 +966,31 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
}
/**
- * ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
-{
- return !!(rx_desc->wb.status_error0 &
- cpu_to_le16(stat_err_bits));
-}
-
-/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb)
{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false;
/* place skb in next buffer to be received */
- rx_ring->rx_buf[ntc].skb = skb;
+ rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- */
-static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
-{
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
- * ice_rx_hash - set the hash value in the skb
- * @rx_ring: descriptor ring
- * @rx_desc: specific descriptor
- * @skb: pointer to current skb
- * @rx_ptype: the ptype value from the descriptor
- */
-static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 rx_ptype)
-{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
- u32 hash;
-
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
- return;
-
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
-}
-
-/**
- * ice_rx_csum - Indicate in skb if checksum is good
- * @ring: the ring we care about
- * @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
- * @ptype: the packet type decoded by hardware
- *
- * skb->protocol must be set before this function is called
- */
-static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
-{
- struct ice_rx_ptype_decoded decoded;
- u32 rx_error, rx_status;
- bool ipv4, ipv6;
-
- rx_status = le16_to_cpu(rx_desc->wb.status_error0);
- rx_error = rx_status;
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
- /* Start with CHECKSUM_NONE and by default csum_level = 0 */
- skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
-
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
- return;
-
- /* check if HW has decoded the packet and checksum */
- if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
- return;
-
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
-
- if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
- goto checksum_fail;
- else if (ipv6 && (rx_status &
- (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
- goto checksum_fail;
-
- /* check for L4 errors and handle packets that were not able to be
- * checksummed due to arrival speed
- */
- if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
- goto checksum_fail;
-
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- default:
- break;
- }
- return;
-
-checksum_fail:
- ring->vsi->back->hw_csum_rx_error++;
-}
-
-/**
- * ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: Rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
- *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, protocol, and
- * other fields within the skb.
- */
-static void
-ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
-{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
-
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
- ice_rx_csum(rx_ring, skb, rx_desc, ptype);
-}
-
-/**
- * ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: Rx ring in play
- * @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
- *
- * This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without VLAN tag)
- */
-static void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
-{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1006,8 +1006,13 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog = NULL;
+ struct xdp_buff xdp;
bool failure;
+ xdp.rxq = &rx_ring->xdp_rxq;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1042,10 +1047,57 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ if (!size) {
+ xdp.data = NULL;
+ xdp.data_end = NULL;
+ xdp.data_hard_start = NULL;
+ xdp.data_meta = NULL;
+ goto construct_skb;
+ }
+
+ xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
+ xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
+ xdp.data_meta = xdp.data;
+ xdp.data_end = xdp.data + size;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ goto construct_skb;
+ }
+
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ rcu_read_unlock();
+ if (!xdp_res)
+ goto construct_skb;
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = ice_rx_pg_size(rx_ring) / 2;
+#else
+ truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
+ size);
+#endif
+ xdp_xmit |= xdp_res;
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ } else {
+ rx_buf->pagecnt_bias++;
+ }
+ total_rx_bytes += size;
+ total_rx_pkts++;
+
+ cleaned_count++;
+ ice_put_rx_buf(rx_ring, rx_buf);
+ continue;
+construct_skb:
if (skb)
- ice_add_rx_frag(rx_buf, skb, size);
+ ice_add_rx_frag(rx_ring, rx_buf, skb, size);
+ else if (ice_ring_uses_build_skb(rx_ring))
+ skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
- skb = ice_construct_skb(rx_ring, rx_buf, size);
+ skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1099,13 +1151,10 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
- /* update queue and vector specific stats */
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.pkts += total_rx_pkts;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ if (xdp_prog)
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1483,9 +1532,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx)
- if (!ice_clean_tx_irq(ring, budget))
+ ice_for_each_ring(ring, q_vector->tx) {
+ bool wd = ring->xsk_umem ?
+ ice_clean_tx_irq_zc(ring, budget) :
+ ice_clean_tx_irq(ring, budget);
+
+ if (!wd)
clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (unlikely(budget <= 0))
@@ -1505,7 +1559,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
- cleaned = ice_clean_rx_irq(ring, budget_per_ring);
+ /* A dedicated path for zero-copy allows making a single
+ * comparison in the irq context instead of many inside the
+ * ice_clean_rx_irq function and makes the codebase cleaner.
+ */
+ cleaned = ring->xsk_umem ?
+ ice_clean_rx_irq_zc(ring, budget_per_ring) :
+ ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1527,17 +1587,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
return min_t(int, work_done, budget - 1);
}
-/* helper function for building cmd/type/offset */
-static __le64
-build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
-{
- return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
- (td_cmd << ICE_TXD_QW1_CMD_S) |
- (td_offset << ICE_TXD_QW1_OFFSET_S) |
- ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
- (td_tag << ICE_TXD_QW1_L2TAG1_S));
-}
-
/**
* __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
@@ -1689,9 +1738,9 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
i = 0;
/* write last descriptor with RS and EOP bits */
- td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag);
+ td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
+ td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 94a9280193e2..a84cc0e6dd27 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,8 +4,12 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include "ice_type.h"
+
#define ICE_DFLT_IRQ_WORK 256
+#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
@@ -22,6 +26,71 @@
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
+/* Attempt to maximize the headroom available for incoming frames. We use a 2K
+ * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
+ * This leaves us with 512 bytes of room. From that we need to deduct the
+ * space needed for the shared info and the padding needed to IP align the
+ * frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define ICE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+
+/**
+ * ice_compute_pad - compute the padding
+ * rx_buf_len: buffer length
+ *
+ * Figure out the size of half page based on given buffer length and
+ * then subtract the skb_shared_info followed by subtraction of the
+ * actual buffer length; this in turn results in the actual space that
+ * is left for padding usage
+ */
+static inline int ice_compute_pad(int rx_buf_len)
+{
+ int half_page_size;
+
+ half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
+}
+
+/**
+ * ice_skb_pad - determine the padding that we can supply
+ *
+ * Figure out the right Rx buffer size and based on that calculate the
+ * padding
+ */
+static inline int ice_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (ICE_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = ICE_RXBUF_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return ice_compute_pad(rx_buf_len);
+}
+
+#define ICE_SKB_PAD ice_skb_pad()
+#else
+#define ICE_2K_TOO_SMALL_WITH_PADDING false
+#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -49,12 +118,24 @@
#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_XDP_PASS 0
+#define ICE_XDP_CONSUMED BIT(0)
+#define ICE_XDP_TX BIT(1)
+#define ICE_XDP_REDIR BIT(2)
+
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
+#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ void *raw_buf; /* used for XDP */
+ };
unsigned int bytecount;
unsigned short gso_segs;
u32 tx_flags;
@@ -76,9 +157,17 @@ struct ice_tx_offload_params {
struct ice_rx_buf {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ice_q_stats {
@@ -198,18 +287,44 @@ struct ice_ring {
};
struct rcu_head rcu; /* to avoid race on free */
+ struct bpf_prog *xdp_prog;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca;
+ /* CL3 - 3rd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u8 flags;
dma_addr_t dma; /* physical address of ring */
unsigned int size; /* length of descriptor ring in bytes */
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
-#ifdef CONFIG_DCB
u8 dcb_tc; /* Traffic class of ring */
-#endif /* CONFIG_DCB */
} ____cacheline_internodealigned_in_smp;
+static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
+}
+
+static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+{
+ ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
+}
+
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
@@ -230,6 +345,19 @@ struct ice_ring_container {
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
+static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
+
+union ice_32b_rx_flex_desc;
+
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_clean_tx_ring(struct ice_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
new file mode 100644
index 000000000000..35bbc4ff603c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_txrx_lib.h"
+
+/**
+ * ice_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ */
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+{
+ u16 prev_ntu = rx_ring->next_to_use;
+
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
+ */
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
+}
+
+/**
+ * ice_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ */
+static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+{
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * ice_rx_hash - set the hash value in the skb
+ * @rx_ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: pointer to current skb
+ * @rx_ptype: the ptype value from the descriptor
+ */
+static void
+ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 rx_ptype)
+{
+ struct ice_32b_rx_flex_desc_nic *nic_mdid;
+ u32 hash;
+
+ if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
+ return;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ hash = le32_to_cpu(nic_mdid->rss_hash);
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+}
+
+/**
+ * ice_rx_csum - Indicate in skb if checksum is good
+ * @ring: the ring we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void
+ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+
+ rx_status = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_error = rx_status;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
+ /* check if Rx checksum is enabled */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ return;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ goto checksum_fail;
+ else if (ipv6 && (rx_status &
+ (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+ goto checksum_fail;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ default:
+ break;
+ }
+ return;
+
+checksum_fail:
+ ring->vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * ice_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: Rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @ptype: the packet type decoded by hardware
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
+{
+ ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+}
+
+/**
+ * ice_receive_skb - Send a completed packet up the stack
+ * @rx_ring: Rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: VLAN tag for packet
+ *
+ * This function sends the completed packet (via. skb) up the stack using
+ * gro receive functions (with/without VLAN tag)
+ */
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+{
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
+ * @data: packet data pointer
+ * @size: packet data size
+ * @xdp_ring: XDP ring for transmission
+ */
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+{
+ u16 i = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ return ICE_XDP_CONSUMED;
+
+ tx_buf = &xdp_ring->tx_buf[i];
+ tx_buf->bytecount = size;
+ tx_buf->gso_segs = 1;
+ tx_buf->raw_buf = data;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ tx_desc = ICE_TX_DESC(xdp_ring, i);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ size, 0);
+
+ /* Make certain all of the status bits have been updated
+ * before next_to_watch is written.
+ */
+ smp_wmb();
+
+ i++;
+ if (i == xdp_ring->count)
+ i = 0;
+
+ tx_buf->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = i;
+
+ return ICE_XDP_TX;
+}
+
+/**
+ * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
+ * @xdp: XDP buffer
+ * @xdp_ring: XDP Tx ring
+ *
+ * Returns negative on failure, 0 on success.
+ */
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+{
+ struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
+
+ if (unlikely(!xdpf))
+ return ICE_XDP_CONSUMED;
+
+ return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+}
+
+/**
+ * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+ * @rx_ring: Rx ring
+ * @xdp_res: Result of the receive batch
+ *
+ * This function bumps XDP Tx tail and/or flush redirect map, and
+ * should be called when a batch of packets has been processed in the
+ * napi loop.
+ */
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+{
+ if (xdp_res & ICE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & ICE_XDP_TX) {
+ struct ice_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->q_index];
+
+ ice_xdp_ring_update_tail(xdp_ring);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
new file mode 100644
index 000000000000..ba9164dad9ae
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_TXRX_LIB_H_
+#define _ICE_TXRX_LIB_H_
+#include "ice.h"
+
+/**
+ * ice_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+{
+ return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+}
+
+static inline __le64
+build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ (td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << ICE_TXD_QW1_L2TAG1_S));
+}
+
+/**
+ * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
+ * @xdp_ring: XDP Tx ring
+ *
+ * This function updates the XDP Tx ring tail register.
+ */
+static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+{
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
+void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
+void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype);
+void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index b45797f39b2f..ad757412bb04 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice.h"
+#include "ice_base.h"
#include "ice_lib.h"
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
new file mode 100644
index 000000000000..fcffad0069d6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -0,0 +1,1181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_type.h"
+#include "ice_xsk.h"
+#include "ice_txrx.h"
+#include "ice_txrx_lib.h"
+#include "ice_lib.h"
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
+ sizeof(vsi->rx_rings[q_idx]->rx_stats));
+ memset(&vsi->tx_rings[q_idx]->stats, 0,
+ sizeof(vsi->tx_rings[q_idx]->stats));
+ if (ice_is_xdp_ena_vsi(vsi))
+ memset(&vsi->xdp_rings[q_idx]->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
+ * @vsi: VSI that has netdev
+ * @q_vector: q_vector that has NAPI context
+ * @enable: true for enable, false for disable
+ */
+static void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable)
+{
+ if (!vsi->netdev || !q_vector)
+ return;
+
+ if (enable)
+ napi_enable(&q_vector->napi);
+ else
+ napi_disable(&q_vector->napi);
+}
+
+/**
+ * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
+ * @vsi: the VSI that contains queue vector being un-configured
+ * @rx_ring: Rx ring that will have its IRQ disabled
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u16 reg;
+ u32 val;
+
+ /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
+ * here only QINT_RQCTL
+ */
+ reg = rx_ring->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+
+ if (q_vector) {
+ u16 v_idx = q_vector->v_idx;
+
+ wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
+ ice_flush(hw);
+ synchronize_irq(pf->msix_entries[v_idx + base].vector);
+ }
+}
+
+/**
+ * ice_qvec_cfg_msix - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ u16 reg_idx = q_vector->reg_idx;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_ring *ring;
+
+ ice_cfg_itr(hw, q_vector);
+
+ wr32(hw, GLINT_RATE(reg_idx),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->tx.itr_idx);
+
+ ice_for_each_ring(ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ q_vector->rx.itr_idx);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qvec_ena_irq - Enable IRQ for given queue vector
+ * @vsi: the VSI that contains queue vector
+ * @q_vector: queue vector
+ */
+static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ ice_irq_dynamic_ena(hw, vsi, q_vector);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int timeout = 50;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (err)
+ return err;
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (err)
+ return err;
+ }
+ err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ if (err)
+ return err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return 0;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_ring *tx_ring, *rx_ring;
+ struct ice_q_vector *q_vector;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ qg_buf->num_txqs = 1;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ if (err)
+ goto free_buf;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(qg_buf, 0, sizeof(*qg_buf));
+ qg_buf->num_txqs = 1;
+ err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ if (err)
+ goto free_buf;
+ ice_set_ring_xdp(xdp_ring);
+ xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ }
+
+ err = ice_setup_rx_ctx(rx_ring);
+ if (err)
+ goto free_buf;
+
+ ice_qvec_cfg_msix(vsi, q_vector);
+
+ err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ if (err)
+ goto free_buf;
+
+ clear_bit(__ICE_CFG_BUSY, vsi->state);
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+free_buf:
+ kfree(qg_buf);
+ return err;
+}
+
+/**
+ * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
+ * @vsi: VSI to allocate the UMEM on
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+{
+ if (vsi->xsk_umems)
+ return 0;
+
+ vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ GFP_KERNEL);
+
+ if (!vsi->xsk_umems) {
+ vsi->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_xsk_add_umem - add a UMEM region for XDP sockets
+ * @vsi: VSI to which the UMEM will be added
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ int err;
+
+ err = ice_xsk_alloc_umems(vsi);
+ if (err)
+ return err;
+
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * @vsi: VSI from which the VSI will be removed
+ * @qid: Ring/qid associated with the UMEM
+ */
+static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+{
+ vsi->xsk_umems[qid] = NULL;
+ vsi->num_xsk_umems_used--;
+
+ if (vsi->num_xsk_umems_used == 0) {
+ kfree(vsi->xsk_umems);
+ vsi->xsk_umems = NULL;
+ vsi->num_xsk_umems = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
+ * @vsi: VSI to map the UMEM region
+ * @umem: UMEM to map
+ *
+ * Returns 0 on success, negative on error
+ */
+static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = &pf->pdev->dev;
+ for (i = 0; i < umem->npgs; i++) {
+ dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ ICE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma)) {
+ dev_dbg(dev,
+ "XSK UMEM DMA mapping error on page num %d", i);
+ goto out_unmap;
+ }
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (; i > 0; i--) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -EFAULT;
+}
+
+/**
+ * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
+ * @vsi: VSI from which the UMEM will be unmapped
+ * @umem: UMEM to unmap
+ */
+static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = &pf->pdev->dev;
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+/**
+ * ice_xsk_umem_disable - disable a UMEM region
+ * @vsi: Current VSI
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+{
+ if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
+ !vsi->xsk_umems[qid])
+ return -EINVAL;
+
+ ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ ice_xsk_remove_umem(vsi, qid);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_enable - enable a UMEM region
+ * @vsi: Current VSI
+ * @umem: pointer to a requested UMEM region
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ int err;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_umems)
+ return -EINVAL;
+
+ if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ return -EBUSY;
+
+ reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ice_xsk_umem_dma_map(vsi, umem);
+ if (err)
+ return err;
+
+ err = ice_xsk_add_umem(vsi, umem, qid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * @vsi: Current VSI
+ * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @qid: queue ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+{
+ bool if_running, umem_present = !!umem;
+ int ret = 0, umem_failure = 0;
+
+ if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+
+ if (if_running) {
+ ret = ice_qp_dis(vsi, qid);
+ if (ret) {
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ goto xsk_umem_if_up;
+ }
+ }
+
+ umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
+ ice_xsk_umem_disable(vsi, qid);
+
+xsk_umem_if_up:
+ if (if_running) {
+ ret = ice_qp_ena(vsi, qid);
+ if (!ret && umem_present)
+ napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ else if (ret)
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ }
+
+ if (umem_failure) {
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ umem_present ? "en" : "dis", umem_failure);
+ return umem_failure;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
+ * @zca: zero-cpoy allocator
+ * @handle: Buffer handle
+ */
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
+{
+ struct ice_rx_buf *rx_buf;
+ struct ice_ring *rx_ring;
+ struct xdp_umem *umem;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(zca, struct ice_ring, zca);
+ umem = rx_ring->xsk_umem;
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ mask = umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ rx_buf = &rx_ring->rx_buf[nta];
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = (u64)handle + umem->headroom;
+}
+
+/**
+ * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the hot path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = rx_buf->addr;
+ u64 handle, hr;
+
+ if (addr) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += hr;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += hr;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
+ * @rx_ring: ring with an xdp_umem bound to it
+ * @rx_buf: buffer to which xsk page address will be assigned
+ *
+ * This function allocates an Rx buffer in the slow path.
+ * The buffer can come from fill queue or recycle queue.
+ *
+ * Returns true if an assignment was successful, false if not.
+ */
+static __always_inline bool
+ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, headroom;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ handle &= umem->chunk_mask;
+ headroom = umem->headroom + XDP_PACKET_HEADROOM;
+
+ rx_buf->dma = xdp_umem_get_dma(umem, handle);
+ rx_buf->dma += headroom;
+
+ rx_buf->addr = xdp_umem_get_data(umem, handle);
+ rx_buf->addr += headroom;
+
+ rx_buf->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ * @alloc: the function pointer to call for allocation
+ *
+ * This function allocates a number of Rx buffers from the fill ring
+ * or the internal recycle mechanism and places them on the Rx ring.
+ *
+ * Returns false if all allocations were successful, true if any fail.
+ */
+static bool
+ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
+ bool alloc(struct ice_ring *, struct ice_rx_buf *))
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ struct ice_rx_buf *rx_buf;
+ bool ret = false;
+
+ if (!count)
+ return false;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ rx_buf = &rx_ring->rx_buf[ntu];
+
+ do {
+ if (!alloc(rx_ring, rx_buf)) {
+ ret = true;
+ break;
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
+ rx_desc->wb.status_error0 = 0;
+
+ rx_desc++;
+ rx_buf++;
+ ntu++;
+
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ rx_buf = rx_ring->rx_buf;
+ ntu = 0;
+ }
+ } while (--count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return ret;
+}
+
+/**
+ * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_fast_zc);
+}
+
+/**
+ * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
+ * @rx_ring: Rx ring
+ * @count: number of bufs to allocate
+ *
+ * Returns false on success, true on failure.
+ */
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
+{
+ return ice_alloc_rx_bufs_zc(rx_ring, count,
+ ice_alloc_buf_slow_zc);
+}
+
+/**
+ * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
+ * @rx_ring: Rx ring
+ */
+static void ice_bump_ntc(struct ice_ring *rx_ring)
+{
+ int ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(ICE_RX_DESC(rx_ring, ntc));
+}
+
+/**
+ * ice_get_rx_buf_zc - Fetch the current Rx buffer
+ * @rx_ring: Rx ring
+ * @size: size of a buffer
+ *
+ * This function returns the current, received Rx buffer and does
+ * DMA synchronization.
+ *
+ * Returns a pointer to the received Rx buffer.
+ */
+static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
+{
+ struct ice_rx_buf *rx_buf;
+
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
+ size, DMA_BIDIRECTIONAL);
+
+ return rx_buf;
+}
+
+/**
+ * ice_reuse_rx_buf_zc - reuse an Rx buffer
+ * @rx_ring: Rx ring
+ * @old_buf: The buffer to recycle
+ *
+ * This function recycles a finished Rx buffer, and places it on the recycle
+ * queue (next_to_alloc).
+ */
+static void
+ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ice_rx_buf *new_buf;
+
+ new_buf = &rx_ring->rx_buf[nta++];
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ new_buf->dma = old_buf->dma & mask;
+ new_buf->dma += hr;
+
+ new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
+ new_buf->addr += hr;
+
+ new_buf->handle = old_buf->handle & mask;
+ new_buf->handle += rx_ring->xsk_umem->headroom;
+
+ old_buf->addr = NULL;
+}
+
+/**
+ * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
+ * @rx_ring: Rx ring
+ * @rx_buf: zero-copy Rx buffer
+ * @xdp: XDP buffer
+ *
+ * This function allocates a new skb from a zero-copy Rx buffer.
+ *
+ * Returns the skb on success, NULL on failure.
+ */
+static struct sk_buff *
+ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end -
+ xdp->data_hard_start;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+
+ return skb;
+}
+
+/**
+ * ice_run_xdp_zc - Executes an XDP program in zero-copy path
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ *
+ * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
+ */
+static int
+ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+{
+ int err, result = ICE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ice_ring *xdp_ring;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return ICE_XDP_PASS;
+ }
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough -- not supported action */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping frame */
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
+ }
+
+ rcu_read_unlock();
+ return result;
+}
+
+/**
+ * ice_clean_rx_irq_zc - consumes packets from the hardware ring
+ * @rx_ring: AF_XDP Rx ring
+ * @budget: NAPI budget
+ *
+ * Returns number of processed packets on success, remaining budget on failure.
+ */
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+ bool failure = 0;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ union ice_32b_rx_flex_desc *rx_desc;
+ unsigned int size, xdp_res = 0;
+ struct ice_rx_buf *rx_buf;
+ struct sk_buff *skb;
+ u16 stat_err_bits;
+ u16 vlan_tag = 0;
+ u8 rx_ptype;
+
+ if (cleaned_count >= ICE_RX_BUF_WRITE) {
+ failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc, stat_err_bits))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (!size)
+ break;
+
+ rx_buf = ice_get_rx_buf_zc(rx_ring, size);
+ if (!rx_buf->addr)
+ break;
+
+ xdp.data = rx_buf->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = rx_buf->handle;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
+ if (xdp_res) {
+ if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ rx_buf->addr = NULL;
+ } else {
+ ice_reuse_rx_buf_zc(rx_ring, rx_buf);
+ }
+
+ total_rx_bytes += size;
+ total_rx_packets++;
+ cleaned_count++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ice_bump_ntc(rx_ring);
+
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc, stat_err_bits))
+ vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+
+ rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+
+ ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_receive_skb(rx_ring, skb, vlan_tag);
+ }
+
+ ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+/**
+ * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: max number of frames to xmit
+ *
+ * Returns true if cleanup/transmission is done.
+ */
+static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+{
+ struct ice_tx_desc *tx_desc = NULL;
+ bool work_done = true;
+ struct xdp_desc desc;
+ dma_addr_t dma;
+
+ while (likely(budget-- > 0)) {
+ struct ice_tx_buf *tx_buf;
+
+ if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ work_done = false;
+ break;
+ }
+
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ break;
+
+ dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
+ DMA_BIDIRECTIONAL);
+
+ tx_buf->bytecount = desc.len;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
+ 0, desc.len, 0);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ice_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return budget > 0 && work_done;
+}
+
+/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @xdp_ring: XDP Tx ring
+ * @tx_buf: Tx buffer to clean
+ */
+static void
+ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+{
+ xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
+ * @xdp_ring: XDP Tx ring
+ * @budget: NAPI budget
+ *
+ * Returns true if cleanup/tranmission is done.
+ */
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+{
+ int total_packets = 0, total_bytes = 0;
+ s16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ bool xmit_done = true;
+ u32 xsk_frames = 0;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntc);
+ tx_buf = &xdp_ring->tx_buf[ntc];
+ ntc -= xdp_ring->count;
+
+ do {
+ if (!(tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets++;
+
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
+ }
+
+ tx_desc->cmd_type_offset_bsz = 0;
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+
+ if (unlikely(!ntc)) {
+ ntc -= xdp_ring->count;
+ tx_buf = xdp_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(xdp_ring, 0);
+ }
+
+ prefetch(tx_desc);
+
+ } while (likely(--budget));
+
+ ntc += xdp_ring->count;
+ xdp_ring->next_to_clean = ntc;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+
+ ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
+ xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
+
+ return budget > 0 && xmit_done;
+}
+
+/**
+ * ice_xsk_wakeup - Implements ndo_xsk_wakeup
+ * @netdev: net_device
+ * @queue_id: queue to wake up
+ * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
+ *
+ * Returns negative on error, zero otherwise.
+ */
+int
+ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
+ u32 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_q_vector *q_vector;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *ring;
+
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return -ENXIO;
+
+ if (queue_id >= vsi->num_txq)
+ return -ENXIO;
+
+ if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ return -ENXIO;
+
+ ring = vsi->xdp_rings[queue_id];
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+ * schedule the NAPI from interrupt context. If NAPI would be
+ * scheduled here, the interrupt affinity would not be
+ * honored.
+ */
+ q_vector = ring->q_vector;
+ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+ ice_trigger_sw_intr(&vsi->back->hw, q_vector);
+
+ return 0;
+}
+
+/**
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * @vsi: VSI to be checked
+ *
+ * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ */
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->xsk_umems)
+ return false;
+
+ for (i = 0; i < vsi->num_xsk_umems; i++) {
+ if (vsi->xsk_umems[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * @rx_ring: ring to be cleaned
+ */
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+{
+ u16 i;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+
+ if (!rx_buf->addr)
+ continue;
+
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
+ rx_buf->addr = NULL;
+ }
+}
+
+/**
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * @xdp_ring: XDP_Tx ring
+ */
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->raw_buf)
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ else
+ xsk_frames++;
+
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
new file mode 100644
index 000000000000..3479e1de98fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_XSK_H_
+#define _ICE_XSK_H_
+#include "ice_txrx.h"
+#include "ice.h"
+
+struct ice_vsi;
+
+#ifdef CONFIG_XDP_SOCKETS
+int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
+int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
+bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
+void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+#else
+static inline int
+ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
+ struct xdp_umem __always_unused *umem,
+ u16 __always_unused qid)
+{
+ return -ENOTSUPP;
+}
+
+static inline void
+ice_zca_free(struct zero_copy_allocator __always_unused *zca,
+ unsigned long __always_unused handle)
+{
+}
+
+static inline int
+ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ int __always_unused budget)
+{
+ return 0;
+}
+
+static inline bool
+ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ int __always_unused budget)
+{
+ return false;
+}
+
+static inline bool
+ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring,
+ u16 __always_unused count)
+{
+ return false;
+}
+
+static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
+{
+ return false;
+}
+
+static inline int
+ice_xsk_wakeup(struct net_device __always_unused *netdev,
+ u32 __always_unused queue_id, u32 __always_unused flags)
+{
+ return -ENOTSUPP;
+}
+
+#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
+#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
+#endif /* CONFIG_XDP_SOCKETS */
+#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 3ec2ce0725d5..8a6ef3514129 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
- if (mac->type == e1000_82580) {
+ if (mac->type == e1000_82580 || mac->type == e1000_i350) {
switch (hw->device_id) {
/* feature not supported on these id's */
case E1000_DEV_ID_DH89XXCC_SGMII:
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 6ad775b1a4c5..63ec253ac788 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -127,6 +127,7 @@ struct e1000_adv_tx_context_desc {
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 105b0624081a..48a40e4132f9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
struct net_device *netdev = igb->netdev;
hw->hw_addr = NULL;
netdev_err(netdev, "PCIe link lost\n");
- WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
+ WARN(pci_device_is_present(igb->pdev),
+ "igb: Failed to read reg 0x%x!\n", reg);
}
return value;
@@ -2064,7 +2065,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
if ((hw->phy.media_type == e1000_media_type_copper) &&
(!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
swap_now = true;
- } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+ !(connsw & E1000_CONNSW_SERDESD)) {
/* copper signal takes time to appear */
if (adapter->copper_tries < 4) {
adapter->copper_tries++;
@@ -2370,7 +2372,7 @@ void igb_reset(struct igb_adapter *adapter)
adapter->ei.get_invariants(hw);
adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
}
- if ((mac->type == e1000_82575) &&
+ if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
(adapter->flags & IGB_FLAG_MAS_ENABLE)) {
igb_enable_mas(adapter);
}
@@ -2516,6 +2518,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -2524,6 +2527,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -3120,7 +3124,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
if (hw->mac.type >= e1000_i350)
netdev->features |= NETIF_F_HW_TC;
@@ -5694,6 +5698,7 @@ static int igb_tso(struct igb_ring *tx_ring,
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
@@ -5713,7 +5718,8 @@ static int igb_tso(struct igb_ring *tx_ring,
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -5741,12 +5747,19 @@ static int igb_tso(struct igb_ring *tx_ring,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 7e16345d836e..0868677d43ed 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -411,7 +411,6 @@ struct igc_adapter {
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
- u32 *shadow_vfta;
u32 rss_queues;
u32 rss_indir_tbl_init;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f3f2325fe567..f3788f0b95b4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -282,7 +282,10 @@
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
/* Receive Descriptor bit definitions */
-#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
@@ -402,4 +405,7 @@
#define IGC_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet Type of TCP */
#define IGC_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+/* Maximum size of the MTA register table in all supported adapters */
+#define MAX_MTA_REG 128
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index abb2d72911ff..20f710645746 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -91,6 +91,7 @@ struct igc_mac_info {
u16 mta_reg_count;
u16 uta_reg_count;
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 5eeb4c8caf4a..12aa6b5fcb5d 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -784,3 +784,107 @@ bool igc_enable_mng_pass_thru(struct igc_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igc_hash_mc_addr - Generate a multicast hash value
+ * @hw: pointer to the HW structure
+ * @mc_addr: pointer to a multicast address
+ *
+ * Generates a multicast address hash value which is used to determine
+ * the multicast filter table array address and new table value. See
+ * igc_mta_set()
+ **/
+static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
+{
+ u32 hash_value, hash_mask;
+ u8 bit_shift = 0;
+
+ /* Register count multiplied by bits per register */
+ hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ * where 0xFF would still fall within the hash mask.
+ */
+ while (hash_mask >> bit_shift != 0xFF)
+ bit_shift++;
+
+ /* The portion of the address that is used for the hash table
+ * is determined by the mc_filter_type setting.
+ * The algorithm is such that there is a total of 8 bits of shifting.
+ * The bit_shift for a mc_filter_type of 0 represents the number of
+ * left-shifts where the MSB of mc_addr[5] would still fall within
+ * the hash_mask. Case 0 does this exactly. Since there are a total
+ * of 8 bits of shifting, then mc_addr[4] will shift right the
+ * remaining number of bits. Thus 8 - bit_shift. The rest of the
+ * cases are a variation of this algorithm...essentially raising the
+ * number of bits to shift mc_addr[5] left, while still keeping the
+ * 8-bit shifting total.
+ *
+ * For example, given the following Destination MAC Address and an
+ * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+ * we can see that the bit_shift for case 0 is 4. These are the hash
+ * values resulting from each mc_filter_type...
+ * [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ *
+ * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+ * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+ * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+ * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+ */
+ switch (hw->mac.mc_filter_type) {
+ default:
+ case 0:
+ break;
+ case 1:
+ bit_shift += 1;
+ break;
+ case 2:
+ bit_shift += 2;
+ break;
+ case 3:
+ bit_shift += 4;
+ break;
+ }
+
+ hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+ (((u16)mc_addr[5]) << bit_shift)));
+
+ return hash_value;
+}
+
+/**
+ * igc_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igc_update_mc_addr_list(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32)i < mc_addr_count; i++) {
+ hash_value = igc_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
+ mc_addr_list += ETH_ALEN;
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
index 782bc995badc..832cccec87cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.h
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -29,6 +29,8 @@ s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex);
bool igc_enable_mng_pass_thru(struct igc_hw *hw);
+void igc_update_mc_addr_list(struct igc_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
enum igc_mng_mode {
igc_mng_mode_none = 0,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 63b62d74f961..6105c6d1f3c9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -795,6 +795,44 @@ static int igc_set_mac(struct net_device *netdev, void *p)
return 0;
}
+/**
+ * igc_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int igc_write_mc_addr_list(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ igc_update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* The shared function expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ igc_update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
u32 vlan_macip_lens, u32 type_tucmd,
@@ -1163,6 +1201,46 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
}
+static void igc_rx_checksum(struct igc_ring *ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* Ignore Checksum bit is set */
+ if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
+ return;
+
+ /* Rx checksum disabled via ethtool */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* TCP/UDP checksum error bit is set */
+ if (igc_test_staterr(rx_desc,
+ IGC_RXDEXT_STATERR_TCPE |
+ IGC_RXDEXT_STATERR_IPE)) {
+ /* work around errata with sctp packets where the TCPE aka
+ * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+ * packets (aka let the stack check the crc32c)
+ */
+ if (!(skb->len == 60 &&
+ test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.csum_err++;
+ u64_stats_update_end(&ring->rx_syncp);
+ }
+ /* let the stack verify checksum errors */
+ return;
+ }
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
+ IGC_RXD_STAT_UDPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ dev_dbg(ring->dev, "cksum success: bits %08X\n",
+ le32_to_cpu(rx_desc->wb.upper.status_error));
+}
+
static inline void igc_rx_hash(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -1189,6 +1267,8 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
{
igc_rx_hash(rx_ring, rx_desc, skb);
+ igc_rx_checksum(rx_ring, rx_desc, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -2518,6 +2598,110 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
IGC_MAC_STATE_QUEUE_STEERING | flags);
}
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, 0))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if (adapter->mac_table[i].state != 0)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ adapter->mac_table[i].queue = 0;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = igc_add_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return min_t(int, ret, 0);
+}
+
+static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ igc_del_mac_filter(adapter, addr, adapter->num_rx_queues);
+
+ return 0;
+}
+
/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2529,6 +2713,44 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
*/
static void igc_set_rx_mode(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ if (netdev->flags & IFF_PROMISC) {
+ rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
+ } else {
+ if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= IGC_RCTL_MPE;
+ } else {
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = igc_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= IGC_RCTL_MPE;
+ }
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
+ rctl |= IGC_RCTL_UPE;
+
+ /* update state of unicast and multicast */
+ rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
+ wr32(IGC_RCTL, rctl);
+
+#if (PAGE_SIZE < 8192)
+ if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
+ rlpml = IGC_MAX_FRAME_BUILD_SKB;
+#endif
+ wr32(IGC_RLPML, rlpml);
}
/**
@@ -3982,6 +4204,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
.ndo_start_xmit = igc_xmit_frame,
+ .ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
@@ -4047,7 +4270,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
hw->hw_addr = NULL;
netif_device_detach(netdev);
netdev_err(netdev, "PCIe link lost, device now detached\n");
- WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
+ WARN(pci_device_is_present(igc->pdev),
+ "igc: Failed to read reg 0x%x!\n", reg);
}
return value;
@@ -4210,7 +4434,9 @@ static int igc_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Add supported features to the features list*/
+ netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_SCTP_CRC;
/* setup the private structure */
err = igc_sw_init(adapter);
@@ -4348,7 +4574,6 @@ static void igc_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
kfree(adapter->mac_table);
- kfree(adapter->shadow_vfta);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cc3196ae5aea..fd9f5d41b594 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -832,9 +832,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx)
{
+ int node = dev_to_node(&adapter->pdev->dev);
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count;
u8 tcs = adapter->hw_tcs;
@@ -845,10 +845,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
if (rss_i > 1 && adapter->atr_sample_rate) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
- }
+ cpu = cpumask_local_spread(v_idx, node);
+ node = cpu_to_node(cpu);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1ce2397306b9..1129ae70d5fb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
- clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
continue;
@@ -7946,6 +7945,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
} ip;
union {
struct tcphdr *tcp;
+ struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
@@ -7969,7 +7969,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -7999,12 +8000,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+ if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ } else {
+ /* compute length of segmentation header */
+ *hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ }
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
@@ -8640,7 +8649,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -10190,6 +10200,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -10198,6 +10209,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
+ NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO |
NETIF_F_TSO6);
@@ -10907,7 +10919,7 @@ skip_sriov:
IXGBE_GSO_PARTIAL_FEATURES;
if (hw->mac.type >= ixgbe_mac_82599EB)
- netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
#ifdef CONFIG_IXGBE_IPSEC
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fb942167ee54..3d5caea096fb 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -61,6 +61,7 @@ config MVNETA
depends on ARCH_MVEBU || COMPILE_TEST
select MVMDIO
select PHYLINK
+ select PAGE_POOL
---help---
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 82ea55ae5053..d5b644131cff 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2959,15 +2959,16 @@ static void set_params(struct mv643xx_eth_private *mp,
static int get_phy_mode(struct mv643xx_eth_private *mp)
{
struct device *dev = mp->dev->dev.parent;
- int iface = -1;
+ phy_interface_t iface;
+ int err;
if (dev->of_node)
- iface = of_get_phy_mode(dev->of_node);
+ err = of_get_phy_mode(dev->of_node, &iface);
/* Historical default if unspecified. We could also read/write
* the interface state in the PSC1
*/
- if (iface < 0)
+ if (!dev->of_node || err)
iface = PHY_INTERFACE_MODE_GMII;
return iface;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e49820675c8c..274ac39c0f0f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -37,6 +37,8 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -322,6 +324,13 @@
ETH_HLEN + ETH_FCS_LEN, \
cache_line_size())
+#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
+#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
+ MVNETA_SKB_HEADROOM))
+#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
+#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
+
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_phys) && \
(addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
@@ -346,6 +355,11 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
+#define MVNETA_XDP_PASS BIT(0)
+#define MVNETA_XDP_DROPPED BIT(1)
+#define MVNETA_XDP_TX BIT(2)
+#define MVNETA_XDP_REDIR BIT(3)
+
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
{ 0x3010, T_REG_32, "good_frames_received", },
@@ -425,6 +439,8 @@ struct mvneta_port {
u32 cause_rx_tx;
struct napi_struct napi;
+ struct bpf_prog *xdp_prog;
+
/* Core clock */
struct clk *clk;
/* AXI clock */
@@ -545,6 +561,20 @@ struct mvneta_rx_desc {
};
#endif
+enum mvneta_tx_buf_type {
+ MVNETA_TYPE_SKB,
+ MVNETA_TYPE_XDP_TX,
+ MVNETA_TYPE_XDP_NDO,
+};
+
+struct mvneta_tx_buf {
+ enum mvneta_tx_buf_type type;
+ union {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ };
+};
+
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
u8 id;
@@ -560,8 +590,8 @@ struct mvneta_tx_queue {
int tx_stop_threshold;
int tx_wake_threshold;
- /* Array of transmitted skb */
- struct sk_buff **tx_skb;
+ /* Array of transmitted buffers */
+ struct mvneta_tx_buf *buf;
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -603,6 +633,10 @@ struct mvneta_rx_queue {
u32 pkts_coal;
u32 time_coal;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
/* Virtual address of the RX buffer */
void **buf_virt_addr;
@@ -641,7 +675,6 @@ static int txq_number = 8;
static int rxq_def;
static int rx_copybreak __read_mostly = 256;
-static int rx_header_size __read_mostly = 128;
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;
@@ -1761,24 +1794,25 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
int i;
for (i = 0; i < num; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
txq->txq_get_index;
- struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
-
- if (skb) {
- bytes_compl += skb->len;
- pkts_compl++;
- }
mvneta_txq_inc_get(txq);
- if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
+ buf->type != MVNETA_TYPE_XDP_TX)
dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE);
- if (!skb)
- continue;
- dev_kfree_skb_any(skb);
+ if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
+ bytes_compl += buf->skb->len;
+ pkts_compl++;
+ dev_kfree_skb_any(buf->skb);
+ } else if (buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) {
+ xdp_return_frame(buf->xdpf);
+ }
}
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -1812,23 +1846,21 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq,
gfp_t gfp_mask)
{
+ enum dma_data_direction dma_dir;
dma_addr_t phys_addr;
struct page *page;
- page = __dev_alloc_page(gfp_mask);
+ page = page_pool_alloc_pages(rxq->page_pool,
+ gfp_mask | __GFP_NOWARN);
if (!page)
return -ENOMEM;
- /* map page for use */
- phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- __free_page(page);
- return -ENOMEM;
- }
-
- phys_addr += pp->rx_offset_correction;
+ phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
+ MVNETA_MAX_RX_BUF_SIZE, dma_dir);
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
+
return 0;
}
@@ -1894,10 +1926,29 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(data);
+ page_pool_put_page(rxq->page_pool, data, false);
+ }
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+}
+
+static void
+mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
+ u32 len, bool tx)
+{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ if (tx) {
+ stats->tx_packets += pkts;
+ stats->tx_bytes += len;
+ } else {
+ stats->rx_packets += pkts;
+ stats->rx_bytes += len;
}
+ u64_stats_update_end(&stats->syncp);
}
static inline
@@ -1925,43 +1976,292 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
return i;
}
+static int
+mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct mvneta_tx_desc *tx_desc;
+ struct mvneta_tx_buf *buf;
+ dma_addr_t dma_addr;
+
+ if (txq->count >= txq->tx_stop_threshold)
+ return MVNETA_XDP_DROPPED;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+
+ buf = &txq->buf[txq->txq_put_index];
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ return MVNETA_XDP_DROPPED;
+ }
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) +
+ sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = xdpf;
+
+ tx_desc->command = MVNETA_TXD_FLZ_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = xdpf->len;
+
+ mvneta_update_stats(pp, 1, xdpf->len, true);
+ mvneta_txq_inc_put(txq);
+ txq->pending++;
+ txq->count++;
+
+ return MVNETA_XDP_TX;
+}
+
+static int
+mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
+{
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int cpu;
+ u32 ret;
+
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf))
+ return MVNETA_XDP_DROPPED;
+
+ cpu = smp_processor_id();
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ if (ret == MVNETA_XDP_TX)
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int
+mvneta_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct mvneta_tx_queue *txq;
+ struct netdev_queue *nq;
+ int i, drops = 0;
+ u32 ret;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ txq = &pp->txqs[cpu % txq_number];
+ nq = netdev_get_tx_queue(pp->dev, txq->id);
+
+ __netif_tx_lock(nq, cpu);
+ for (i = 0; i < num_frame; i++) {
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ if (ret != MVNETA_XDP_TX) {
+ xdp_return_frame_rx_napi(frames[i]);
+ drops++;
+ }
+ }
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ mvneta_txq_pend_desc_add(pp, txq, 0);
+ __netif_tx_unlock(nq);
+
+ return num_frame - drops;
+}
+
+static int
+mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+ struct bpf_prog *prog, struct xdp_buff *xdp)
+{
+ u32 ret, act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ ret = MVNETA_XDP_PASS;
+ break;
+ case XDP_REDIRECT: {
+ int err;
+
+ err = xdp_do_redirect(pp->dev, xdp, prog);
+ if (err) {
+ ret = MVNETA_XDP_DROPPED;
+ xdp_return_buff(xdp);
+ } else {
+ ret = MVNETA_XDP_REDIR;
+ }
+ break;
+ }
+ case XDP_TX:
+ ret = mvneta_xdp_xmit_back(pp, xdp);
+ if (ret != MVNETA_XDP_TX)
+ xdp_return_buff(xdp);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(pp->dev, prog, act);
+ /* fall through */
+ case XDP_DROP:
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(xdp->data));
+ ret = MVNETA_XDP_DROPPED;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mvneta_swbm_rx_frame(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog,
+ struct page *page, u32 *xdp_ret)
+{
+ unsigned char *data = page_address(page);
+ int data_len = -MVNETA_MH_SIZE, len;
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+
+ if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len += len;
+ } else {
+ len = rx_desc->data_size;
+ data_len += len - ETH_FCS_LEN;
+ }
+
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+
+ /* Prefetch header */
+ prefetch(data);
+
+ xdp->data_hard_start = data;
+ xdp->data = data + MVNETA_SKB_HEADROOM + MVNETA_MH_SIZE;
+ xdp->data_end = xdp->data + data_len;
+ xdp_set_data_meta_invalid(xdp);
+
+ if (xdp_prog) {
+ u32 ret;
+
+ ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
+ if (ret != MVNETA_XDP_PASS) {
+ mvneta_update_stats(pp, 1,
+ xdp->data_end - xdp->data,
+ false);
+ rx_desc->buf_phys_addr = 0;
+ *xdp_ret |= ret;
+ return ret;
+ }
+ }
+
+ rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
+ if (unlikely(!rxq->skb)) {
+ netdev_err(dev,
+ "Can't allocate skb on queue %d\n",
+ rxq->id);
+ dev->stats.rx_dropped++;
+ rxq->skb_alloc_err++;
+ return -ENOMEM;
+ }
+ page_pool_release_page(rxq->page_pool, page);
+
+ skb_reserve(rxq->skb,
+ xdp->data - xdp->data_hard_start);
+ skb_put(rxq->skb, xdp->data_end - xdp->data);
+ mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
+
+ rxq->left_size = rx_desc->data_size - len;
+ rx_desc->buf_phys_addr = 0;
+
+ return 0;
+}
+
+static void
+mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
+ struct mvneta_rx_desc *rx_desc,
+ struct mvneta_rx_queue *rxq,
+ struct page *page)
+{
+ struct net_device *dev = pp->dev;
+ enum dma_data_direction dma_dir;
+ int data_len, len;
+
+ if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
+ len = MVNETA_MAX_RX_BUF_SIZE;
+ data_len = len;
+ } else {
+ len = rxq->left_size;
+ data_len = len - ETH_FCS_LEN;
+ }
+ dma_dir = page_pool_get_dma_dir(rxq->page_pool);
+ dma_sync_single_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ len, dma_dir);
+ if (data_len > 0) {
+ /* refill descriptor with new buffer later */
+ skb_add_rx_frag(rxq->skb,
+ skb_shinfo(rxq->skb)->nr_frags,
+ page, MVNETA_SKB_HEADROOM, data_len,
+ PAGE_SIZE);
+ }
+ page_pool_release_page(rxq->page_pool, page);
+ rx_desc->buf_phys_addr = 0;
+ rxq->left_size -= len;
+}
+
/* Main rx processing when using software buffer management */
static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
+ int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
struct net_device *dev = pp->dev;
- int rx_todo, rx_proc;
- int refill = 0;
- u32 rcvd_pkts = 0;
- u32 rcvd_bytes = 0;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp_buf;
+ int rx_todo, refill;
+ u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
- rx_proc = 0;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(pp->xdp_prog);
+ xdp_buf.rxq = &rxq->xdp_rxq;
/* Fairness NAPI loop */
- while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
+ while (rx_proc < budget && rx_proc < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
- unsigned char *data;
- struct page *page;
- dma_addr_t phys_addr;
u32 rx_status, index;
- int rx_bytes, skb_size, copy_size;
- int frag_num, frag_size, frag_offset;
+ struct page *page;
index = rx_desc - rxq->descs;
page = (struct page *)rxq->buf_virt_addr[index];
- data = page_address(page);
- /* Prefetch header */
- prefetch(data);
- phys_addr = rx_desc->buf_phys_addr;
rx_status = rx_desc->status;
rx_proc++;
rxq->refill_num++;
if (rx_status & MVNETA_RXD_FIRST_DESC) {
+ int err;
+
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
@@ -1969,85 +2269,18 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* leave the descriptor untouched */
continue;
}
- rx_bytes = rx_desc->data_size -
- (ETH_FCS_LEN + MVNETA_MH_SIZE);
- /* Allocate small skb for each new packet */
- skb_size = max(rx_copybreak, rx_header_size);
- rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
- if (unlikely(!rxq->skb)) {
- netdev_err(dev,
- "Can't allocate skb on queue %d\n",
- rxq->id);
- dev->stats.rx_dropped++;
- rxq->skb_alloc_err++;
+ err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
+ xdp_prog, page, &xdp_ret);
+ if (err)
continue;
- }
- copy_size = min(skb_size, rx_bytes);
-
- /* Copy data from buffer to SKB, skip Marvell header */
- memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
- copy_size);
- skb_put(rxq->skb, copy_size);
- rxq->left_size = rx_bytes - copy_size;
-
- mvneta_rx_csum(pp, rx_status, rxq->skb);
- if (rxq->left_size == 0) {
- int size = copy_size + MVNETA_MH_SIZE;
-
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr, 0,
- size,
- DMA_FROM_DEVICE);
-
- /* leave the descriptor and buffer untouched */
- } else {
- /* refill descriptor with new buffer later */
- rx_desc->buf_phys_addr = 0;
-
- frag_num = 0;
- frag_offset = copy_size + MVNETA_MH_SIZE;
- frag_size = min(rxq->left_size,
- (int)(PAGE_SIZE - frag_offset));
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
- rxq->left_size -= frag_size;
- }
} else {
- /* Middle or Last descriptor */
if (unlikely(!rxq->skb)) {
pr_debug("no skb for rx_status 0x%x\n",
rx_status);
continue;
}
- if (!rxq->left_size) {
- /* last descriptor has only FCS */
- /* and can be discarded */
- dma_sync_single_range_for_cpu(dev->dev.parent,
- phys_addr, 0,
- ETH_FCS_LEN,
- DMA_FROM_DEVICE);
- /* leave the descriptor and buffer untouched */
- } else {
- /* refill descriptor with new buffer later */
- rx_desc->buf_phys_addr = 0;
-
- frag_num = skb_shinfo(rxq->skb)->nr_frags;
- frag_offset = 0;
- frag_size = min(rxq->left_size,
- (int)(PAGE_SIZE - frag_offset));
- skb_add_rx_frag(rxq->skb, frag_num, page,
- frag_offset, frag_size,
- PAGE_SIZE);
-
- dma_unmap_page(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- rxq->left_size -= frag_size;
- }
+ mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2072,17 +2305,14 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* clean uncomplete skb pointer in queue */
rxq->skb = NULL;
- rxq->left_size = 0;
}
+ rcu_read_unlock();
- if (rcvd_pkts) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ if (xdp_ret & MVNETA_XDP_REDIR)
+ xdp_do_flush_map();
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
+ if (rcvd_pkts)
+ mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2206,14 +2436,8 @@ err_drop_frame:
napi_gro_receive(napi, skb);
}
- if (rcvd_pkts) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets += rcvd_pkts;
- stats->rx_bytes += rcvd_bytes;
- u64_stats_update_end(&stats->syncp);
- }
+ if (rcvd_pkts)
+ mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2225,16 +2449,19 @@ static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
- struct mvneta_tx_desc *tx_desc;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ struct mvneta_tx_desc *tx_desc;
- txq->tx_skb[txq->txq_put_index] = NULL;
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
+
mvneta_txq_inc_put(txq);
}
@@ -2243,6 +2470,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
struct sk_buff *skb, char *data, int size,
bool last_tcp, bool is_last)
{
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2256,7 +2484,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
}
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->type = MVNETA_TYPE_SKB;
+ buf->skb = NULL;
if (last_tcp) {
/* last descriptor in the TCP packet */
@@ -2264,7 +2493,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
/* last descriptor in SKB */
if (is_last)
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
}
mvneta_txq_inc_put(txq);
return 0;
@@ -2349,6 +2578,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
int i, nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = skb_frag_address(frag);
@@ -2368,12 +2598,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
}
+ buf->type = MVNETA_TYPE_SKB;
mvneta_txq_inc_put(txq);
}
@@ -2401,6 +2632,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
struct mvneta_tx_desc *tx_desc;
int len = skb->len;
int frags = 0;
@@ -2433,16 +2665,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ buf->type = MVNETA_TYPE_SKB;
if (frags == 1) {
/* First and Last descriptor */
tx_cmd |= MVNETA_TXD_FLZ_DESC;
tx_desc->command = tx_cmd;
- txq->tx_skb[txq->txq_put_index] = skb;
+ buf->skb = skb;
mvneta_txq_inc_put(txq);
} else {
/* First but not Last */
tx_cmd |= MVNETA_TXD_F_DESC;
- txq->tx_skb[txq->txq_put_index] = NULL;
+ buf->skb = NULL;
mvneta_txq_inc_put(txq);
tx_desc->command = tx_cmd;
/* Continue with other skb fragments */
@@ -2459,7 +2692,6 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
netdev_tx_sent_queue(nq, len);
@@ -2474,10 +2706,7 @@ out:
else
txq->pending += frags;
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += len;
- u64_stats_update_end(&stats->syncp);
+ mvneta_update_stats(pp, 1, len, true);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -2830,11 +3059,55 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
return rx_done;
}
+static int mvneta_create_page_pool(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq, int size)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = size,
+ .nid = cpu_to_node(0),
+ .dev = pp->dev->dev.parent,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
int num)
{
- int i;
+ int i, err;
+
+ err = mvneta_create_page_pool(pp, rxq, num);
+ if (err < 0)
+ return err;
for (i = 0; i < num; i++) {
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
@@ -2908,7 +3181,7 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
/* Set Offset */
mvneta_rxq_offset_set(pp, rxq, 0);
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
- PAGE_SIZE :
+ MVNETA_MAX_RX_BUF_SIZE :
MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
@@ -2989,9 +3262,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
- txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
- GFP_KERNEL);
- if (!txq->tx_skb) {
+ txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
+ if (!txq->buf) {
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3003,7 +3275,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
if (!txq->tso_hdrs) {
- kfree(txq->tx_skb);
+ kfree(txq->buf);
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
@@ -3056,7 +3328,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
- kfree(txq->tx_skb);
+ kfree(txq->buf);
if (txq->tso_hdrs)
dma_free_coherent(pp->dev->dev.parent,
@@ -3263,6 +3535,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
+ if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ return -EINVAL;
+ }
+
dev->mtu = mtu;
if (!netif_running(dev)) {
@@ -3932,6 +4209,47 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phylink_mii_ioctl(pp->phylink, ifr, cmd);
}
+static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(dev);
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!pp->xdp_prog != !!prog;
+ if (running && need_update)
+ mvneta_stop(dev);
+
+ old_prog = xchg(&pp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running && need_update)
+ return mvneta_open(dev);
+
+ return 0;
+}
+
+static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/* Ethtool methods */
/* Set link ksettings (phy address, speed) for ethtools */
@@ -4328,6 +4646,8 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_fix_features = mvneta_fix_features,
.ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
+ .ndo_bpf = mvneta_xdp,
+ .ndo_xdp_xmit = mvneta_xdp_xmit,
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
@@ -4477,9 +4797,9 @@ static int mvneta_probe(struct platform_device *pdev)
struct phy *comphy;
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
+ phy_interface_t phy_mode;
const char *mac_from;
int tx_csum_limit;
- int phy_mode;
int err;
int cpu;
@@ -4492,10 +4812,9 @@ static int mvneta_probe(struct platform_device *pdev)
if (dev->irq == 0)
return -EINVAL;
- phy_mode = of_get_phy_mode(dn);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(dn, &phy_mode);
+ if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto err_free_irq;
}
@@ -4618,7 +4937,7 @@ static int mvneta_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pp->id = global_port_id++;
- pp->rx_offset_correction = 0; /* not relevant for SW BM */
+ pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
/* Obtain access to BM resources if enabled and already initialized */
bm_node = of_parse_phandle(dn, "buffer-manager", 0);
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index c8425d35c049..e47783ce77e0 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -160,16 +160,23 @@ static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
(bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
}
#else
-void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
- struct mvneta_bm_pool *bm_pool, u8 port_map) {}
-void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
- u8 port_map) {}
-int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
- struct mvneta_bm_pool *bm_pool) {return 0; }
-struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
- enum mvneta_bm_type type, u8 port_id,
- int pkt_size) { return NULL; }
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+ u8 pool_id,
+ enum mvneta_bm_type type,
+ u8 port_id,
+ int pkt_size)
+{ return NULL; }
static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool,
@@ -178,7 +185,8 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool)
{ return 0; }
-struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
-void mvneta_bm_put(struct mvneta_bm *priv) {}
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
#endif /* CONFIG_MVNETA_BM */
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 111b3b8239e1..17e24c1e1c2b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2863,7 +2863,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+/* Allocate a new skb and add it to BM pool */
static int mvpp2_rx_refill(struct mvpp2_port *port,
struct mvpp2_bm_pool *bm_pool, int pool)
{
@@ -2871,7 +2871,6 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
phys_addr_t phys_addr;
void *buf;
- /* No recycle or too many buffers are in use, so allocate a new skb */
buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
GFP_ATOMIC);
if (!buf)
@@ -2957,14 +2956,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
* by the hardware, and the information about the buffer is
* comprised by the RX descriptor.
*/
- if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
-err_drop_frame:
- dev->stats.rx_errors++;
- mvpp2_rx_error(port, rx_desc);
- /* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
- continue;
- }
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
+ dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
+ rx_bytes + MVPP2_MH_SIZE,
+ DMA_FROM_DEVICE);
+ prefetch(data);
if (bm_pool->frag_size > PAGE_SIZE)
frag_size = 0;
@@ -2983,8 +2981,9 @@ err_drop_frame:
goto err_drop_frame;
}
- dma_unmap_single(dev->dev.parent, dma_addr,
- bm_pool->buf_size, DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2995,6 +2994,13 @@ err_drop_frame:
mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(napi, skb);
+ continue;
+
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
if (rcvd_pkts) {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 51b77c2de400..3fb7ee3d4d13 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1489,8 +1489,10 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
- pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
of_node_put(np);
+ err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
+ if (err && err != -ENODEV)
+ goto err_netdev;
}
/* Hardware supports only 3 ports */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index ef11cf3d1ccc..0fe97155dd8f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -57,7 +57,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
default:
updated = false;
break;
- };
+ }
if (updated) {
val = mtk_r32(eth, MTK_MAC_MISC);
@@ -143,7 +143,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
default:
updated = false;
break;
- };
+ }
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
@@ -174,7 +174,7 @@ static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
break;
default:
updated = false;
- };
+ }
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c61069340f4f..385a4ab9ec99 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -261,6 +261,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
ge_mode = 0;
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
@@ -2757,9 +2758,10 @@ static const struct net_device_ops mtk_netdev_ops = {
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
+ phy_interface_t phy_mode;
struct phylink *phylink;
- int phy_mode, id, err;
struct mtk_mac *mac;
+ int id, err;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2804,10 +2806,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
/* phylink create */
- phy_mode = of_get_phy_mode(np);
- if (phy_mode < 0) {
+ err = of_get_phy_mode(np, &phy_mode);
+ if (err) {
dev_err(eth->dev, "incorrect phy-mode\n");
- err = -EINVAL;
goto free_netdev;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 4db27dfc7ec1..32d83421226a 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -93,7 +93,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
case SPEED_1000:
val |= SGMII_SPEED_1000;
break;
- };
+ }
if (state->duplex == DUPLEX_FULL)
val |= SGMII_DUPLEX_FULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index fce9b3a24347..22c72fb7206a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3935,13 +3935,17 @@ static void mlx4_restart_one_down(struct pci_dev *pdev);
static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink);
-static int mlx4_devlink_reload_down(struct devlink *devlink,
+static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_dev_persistent *persist = dev->persist;
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
if (persist->num_vfs)
mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
mlx4_restart_one_down(persist->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4356f3a58002..1187ef1375e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ int vf)
{
- /* reduce the sink counter */
- return (dev->caps.max_counters - 1 -
- (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
- / MLX4_MAX_PORTS;
+ struct mlx4_active_ports actv_ports;
+ int ports, counters_guaranteed;
+
+ /* For master, only allocate according to the number of phys ports */
+ if (vf == mlx4_master_func_num(dev))
+ return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+ /* calculate real number of ports for the VF */
+ actv_ports = mlx4_get_active_ports(dev, vf);
+ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+ /* If we do not have enough counters for this VF, do not
+ * allocate any for it. '-1' to reduce the sink counter.
+ */
+ if ((res_alloc->res_reserved + counters_guaranteed) >
+ (dev->caps.max_counters - 1))
+ return 0;
+
+ return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
- int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kcalloc(dev->num_slaves, sizeof(struct slave_list),
@@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
- if (t == mlx4_master_func_num(dev))
- res_alloc->guaranteed[t] =
- MLX4_PF_COUNTERS_PER_PORT *
- MLX4_MAX_PORTS;
- else if (t <= max_vfs_guarantee_counter)
- res_alloc->guaranteed[t] =
- MLX4_VF_COUNTERS_PER_PORT *
- MLX4_MAX_PORTS;
- else
- res_alloc->guaranteed[t] = 0;
+ res_alloc->guaranteed[t] =
+ mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 5708fcc079ca..a6f390fdb971 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -70,7 +70,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o steering/dr_crc32.o \
+ steering/dr_icm_pool.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index ea934cd02448..34cba97f7bf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -866,7 +866,7 @@ static void cmd_work_handler(struct work_struct *work)
if (!ent->page_queue) {
alloc_ret = alloc_ent(cmd);
if (alloc_ret < 0) {
- mlx5_core_err(dev, "failed to allocate command entry\n");
+ mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
ent->callback(-EAGAIN, ent->context);
mlx5_free_cmd_msg(dev, ent->out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8d76452cacdc..f1a7bc46f1c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
u8 num_wqebbs;
u8 num_dma;
#ifdef CONFIG_MLX5_EN_TLS
- skb_frag_t *resync_dump_frag;
+ struct page *resync_dump_frag_page;
#endif
};
@@ -410,6 +410,7 @@ struct mlx5e_txqsq {
struct device *pdev;
__be32 mkey_be;
unsigned long state;
+ unsigned int hw_mtu;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index b3a249b2a482..ac44bbe95c5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
"Failed to create hv vhca stats agent, err = %ld\n",
PTR_ERR(agent));
- kfree(priv->stats_agent.buf);
+ kvfree(priv->stats_agent.buf);
return IS_ERR_OR_NULL(agent);
}
@@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
return;
mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
- kfree(priv->stats_agent.buf);
+ kvfree(priv->stats_agent.buf);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 633b117eb13e..7b672ada63a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -175,7 +175,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @port_buffer: <output> port receive buffer configuration
* @change: <output>
*
- * Update buffer configuration based on pfc configuraiton and
+ * Update buffer configuration based on pfc configuration and
* priority to buffer mapping.
* Buffer's lossy bit is changed to:
* lossless if there is at least one PFC enabled priority
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index b860569d4247..6c72b592315b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -222,7 +222,8 @@ static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
}
static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
- void *context)
+ void *context,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_err_ctx *err_ctx = context;
@@ -301,7 +302,8 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_params *params = &priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bfed558637c2..b468549e96ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -135,7 +135,8 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
}
static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
- void *context)
+ void *context,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_err_ctx *err_ctx = context;
@@ -205,7 +206,8 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
}
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index f8ee18b4da6f..13af72556987 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
if (ret)
return ret;
- if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
+ if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
+ ip_rt_put(rt);
return -ENETUNREACH;
+ }
#else
return -EOPNOTSUPP;
#endif
ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
- if (ret < 0)
+ if (ret < 0) {
+ ip_rt_put(rt);
return ret;
+ }
if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst);
@@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
*out_ttl = ip6_dst_hoplimit(dst);
ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
- if (ret < 0)
+ if (ret < 0) {
+ dst_release(dst);
return ret;
+ }
#else
return -EOPNOTSUPP;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 87be96747902..7c8796d9743f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -15,15 +15,14 @@
#else
/* TLS offload requires additional stop_room for:
* - a resync SKB.
- * kTLS offload requires additional stop_room for:
- * - static params WQE,
- * - progress params WQE, and
- * - resync DUMP per frag.
+ * kTLS offload requires fixed additional stop_room for:
+ * - a static params WQE, and a progress params WQE.
+ * The additional MTU-depending room for the resync DUMP WQEs
+ * will be calculated and added in runtime.
*/
#define MLX5E_SQ_TLS_ROOM \
(MLX5_SEND_WQE_MAX_WQEBBS + \
- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
- MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
+ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
#endif
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
@@ -92,7 +91,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
for (; wi < edge_wi; wi++) {
- wi->skb = NULL;
+ memset(wi, 0, sizeof(*wi));
wi->num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index d2ff74d52720..46725cd743a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
return -ENOMEM;
tx_priv->expected_seq = start_offload_tcp_sn;
- tx_priv->crypto_info = crypto_info;
+ tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
/* tc and underlay_qpn values are not in use for tls tis */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index b7298f9ee3d3..a3efa29a4629 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -21,7 +21,14 @@
MLX5_ST_SZ_BYTES(tls_progress_params))
#define MLX5E_KTLS_PROGRESS_WQEBBS \
(DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+
+struct mlx5e_dump_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_data_seg data;
+};
+
+#define MLX5E_KTLS_DUMP_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
enum {
MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0,
@@ -37,7 +44,7 @@ enum {
struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
- struct tls_crypto_info *crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
u32 expected_seq;
u32 tisn;
u32 key_id;
@@ -86,14 +93,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_tx_wqe **wqe, u16 *pi);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
- struct mlx5e_sq_dma *dma);
-
+ u32 *dma_fifo_cc);
+static inline u8
+mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
+ unsigned int sync_len)
+{
+ /* Given the MTU and sync_len, calculates an upper bound for the
+ * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+ */
+ return MLX5E_KTLS_DUMP_WQEBBS *
+ (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+}
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline void
+mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc) {}
+
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index d195366461c9..778dab1af8fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -24,17 +24,12 @@ enum {
static void
fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
{
- struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
- struct tls12_crypto_info_aes_gcm_128 *info;
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
char *initial_rn, *gcm_iv;
u16 salt_sz, rec_seq_sz;
char *salt, *rec_seq;
u8 tls_version;
- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
- return;
-
- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
EXTRACT_INFO_FIELDS;
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
@@ -108,16 +103,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
- u16 pi, u8 num_wqebbs,
- skb_frag_t *resync_dump_frag,
- u32 num_bytes)
+ u16 pi, u8 num_wqebbs, u32 num_bytes,
+ struct page *page)
{
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
- wi->skb = NULL;
- wi->num_wqebbs = num_wqebbs;
- wi->resync_dump_frag = resync_dump_frag;
- wi->num_bytes = num_bytes;
+ memset(wi, 0, sizeof(*wi));
+ wi->num_wqebbs = num_wqebbs;
+ wi->num_bytes = num_bytes;
+ wi->resync_dump_frag_page = page;
}
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -145,7 +139,7 @@ post_static_params(struct mlx5e_txqsq *sq,
umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
}
@@ -159,7 +153,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
}
@@ -169,6 +163,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
bool skip_static_post, bool fence_first_post)
{
bool progress_fence = skip_static_post || !fence_first_post;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 contig_wqebbs_room, pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room <
+ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
if (!skip_static_post)
post_static_params(sq, priv_tx, fence_first_post);
@@ -180,29 +182,36 @@ struct tx_sync_info {
u64 rcd_sn;
s32 sync_len;
int nr_frags;
- skb_frag_t *frags[MAX_SKB_FRAGS];
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+enum mlx5e_ktls_sync_retval {
+ MLX5E_KTLS_SYNC_DONE,
+ MLX5E_KTLS_SYNC_FAIL,
+ MLX5E_KTLS_SYNC_SKIP_NO_DATA,
};
-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
- u32 tcp_seq, struct tx_sync_info *info)
+static enum mlx5e_ktls_sync_retval
+tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ u32 tcp_seq, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
+ enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
- bool ret = true;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
if (unlikely(!record)) {
- ret = false;
+ ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
- if (!tls_record_is_start_marker(record))
- ret = false;
+ ret = tls_record_is_start_marker(record) ?
+ MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
goto out;
}
@@ -211,13 +220,13 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
while (remaining > 0) {
skb_frag_t *frag = &record->frags[i];
- __skb_frag_ref(frag);
+ get_page(skb_frag_page(frag));
remaining -= skb_frag_size(frag);
- info->frags[i++] = frag;
+ info->frags[i++] = *frag;
}
/* reduce the part which will be sent with the original SKB */
if (remaining < 0)
- skb_frag_size_add(info->frags[i - 1], remaining);
+ skb_frag_size_add(&info->frags[i - 1], remaining);
info->nr_frags = i;
out:
spin_unlock_irqrestore(&tx_ctx->lock, flags);
@@ -229,17 +238,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
- struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
- struct tls12_crypto_info_aes_gcm_128 *info;
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
- return;
-
- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
rec_seq = info->rec_seq;
rec_seq_sz = sizeof(info->rec_seq);
@@ -250,11 +254,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
}
-struct mlx5e_dump_wqe {
- struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_data_seg data;
-};
-
static int
tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
{
@@ -262,7 +261,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_dump_wqe *wqe;
dma_addr_t dma_addr = 0;
- u8 num_wqebbs;
u16 ds_cnt;
int fsz;
u16 pi;
@@ -270,7 +268,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
cseg = &wqe->ctrl;
dseg = &wqe->data;
@@ -291,24 +288,27 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
- tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
- sq->pc += num_wqebbs;
-
- WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
- "unexpected DUMP num_wqebbs, %d > %d",
- num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
+ tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
+ sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
return 0;
}
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
- struct mlx5e_sq_dma *dma)
+ u32 *dma_fifo_cc)
{
- struct mlx5e_sq_stats *stats = sq->stats;
+ struct mlx5e_sq_stats *stats;
+ struct mlx5e_sq_dma *dma;
+
+ if (!wi->resync_dump_frag_page)
+ return;
+
+ dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+ stats = sq->stats;
mlx5e_tx_dma_unmap(sq->pdev, dma);
- __skb_frag_unref(wi->resync_dump_frag);
+ put_page(wi->resync_dump_frag_page);
stats->tls_dump_packets++;
stats->tls_dump_bytes += wi->num_bytes;
}
@@ -318,25 +318,31 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- tx_fill_wi(sq, pi, 1, NULL, 0);
+ tx_fill_wi(sq, pi, 1, 0, NULL);
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
}
-static struct sk_buff *
+static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
+ int datalen,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
+ enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
u16 contig_wqebbs_room, pi;
u8 num_wqebbs;
- int i;
-
- if (!tx_sync_info_get(priv_tx, seq, &info)) {
+ int i = 0;
+
+ ret = tx_sync_info_get(priv_tx, seq, &info);
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
+ if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
+ stats->tls_skip_no_sync_data++;
+ return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
+ }
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
@@ -346,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
}
if (unlikely(info.sync_len < 0)) {
- u32 payload;
- int headln;
-
- headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload = skb->len - headln;
- if (likely(payload <= -info.sync_len))
- return skb;
+ if (likely(datalen <= -info.sync_len))
+ return MLX5E_KTLS_SYNC_DONE;
stats->tls_drop_bypass_req++;
goto err_out;
@@ -360,30 +361,62 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
stats->tls_ooo++;
- num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
- (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
+ tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+ /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+ * actual data xmit.
+ */
+ if (!info.nr_frags) {
+ tx_post_fence_nop(sq);
+ return MLX5E_KTLS_SYNC_DONE;
+ }
+
+ num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+
if (unlikely(contig_wqebbs_room < num_wqebbs))
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- for (i = 0; i < info.nr_frags; i++)
- if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
- goto err_out;
+ for (; i < info.nr_frags; i++) {
+ unsigned int orig_fsz, frag_offset = 0, n = 0;
+ skb_frag_t *f = &info.frags[i];
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags)
- tx_post_fence_nop(sq);
+ orig_fsz = skb_frag_size(f);
- return skb;
+ do {
+ bool fence = !(i || frag_offset);
+ unsigned int fsz;
+
+ n++;
+ fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
+ skb_frag_size_set(f, fsz);
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ page_ref_add(skb_frag_page(f), n - 1);
+ goto err_out;
+ }
+
+ skb_frag_off_add(f, fsz);
+ frag_offset += fsz;
+ } while (frag_offset < orig_fsz);
+
+ page_ref_add(skb_frag_page(f), n - 1);
+ }
+
+ return MLX5E_KTLS_SYNC_DONE;
err_out:
- dev_kfree_skb_any(skb);
- return NULL;
+ for (; i < info.nr_frags; i++)
+ /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+ * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
+ * released only upon their completions (or in mlx5e_free_txqsq_descs,
+ * if channel closes).
+ */
+ put_page(skb_frag_page(&info.frags[i]));
+
+ return MLX5E_KTLS_SYNC_FAIL;
}
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
@@ -419,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
- skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
- if (unlikely(!skb))
+ enum mlx5e_ktls_sync_retval ret =
+ mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+
+ if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+ else if (ret == MLX5E_KTLS_SYNC_FAIL)
+ goto err_out;
+ else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
goto out;
- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
}
priv_tx->expected_seq = seq + datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c5a9c20d7f00..327c93a7bd55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(const unsigned long *adver)
{
#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7569287f8f3c..772bfdbdeb9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
+ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+#ifdef CONFIG_MLX5_EN_TLS
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
- sq->stop_room += MLX5E_SQ_TLS_ROOM;
+ sq->stop_room += MLX5E_SQ_TLS_ROOM +
+ mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
+ TLS_MAX_PAYLOAD_SIZE);
}
+#endif
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1349,9 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe_info *wi;
struct mlx5e_tx_wqe *nop;
- sq->db.wqe_info[pi].skb = NULL;
+ wi = &sq->db.wqe_info[pi];
+
+ memset(wi, 0, sizeof(*wi));
+ wi->num_wqebbs = 1;
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 95892a3b63a1..cd9bb7c7b341 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
mutex_lock(&esw->offloads.encap_tbl_lock);
encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- if (e->compl_result || (encap_connected == neigh_connected &&
- ether_addr_equal(e->h_dest, ha)))
+ if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d6a547238de0..82cffb3a9964 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
- if (rq->cqd.left)
+ if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
+ if (rq->cqd.left || work_done >= budget)
+ goto out;
+ }
cqe = mlx5_cqwq_get_cqe(cqwq);
if (!cqe) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 840ec945ccba..bbff8d8ded76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -35,6 +35,7 @@
#include <linux/udp.h>
#include <net/udp.h>
#include "en.h"
+#include "en/port.h"
enum {
MLX5E_ST_LINK_STATE,
@@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- u32 eth_proto_oper;
- int i;
+ u32 speed;
if (!netif_carrier_ok(priv->netdev))
return 1;
- if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
- return 1;
-
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
- if (eth_proto_oper & MLX5E_PROT_MASK(i))
- return 0;
- }
- return 1;
+ return mlx5e_port_linkspeed(priv->mdev, &speed);
}
struct mlx5ehdr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index ac6fdcda7019..7e6ebd0505cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
+ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
+ s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
+ s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
- s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
- s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
#endif
s->tx_cqes += sq_stats->cqes;
}
@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
#endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 79f261bf86ac..869f3502f631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
u64 tx_tls_ooo;
+ u64 tx_tls_dump_packets;
+ u64 tx_tls_dump_bytes;
u64 tx_tls_resync_bytes;
+ u64 tx_tls_skip_no_sync_data;
u64 tx_tls_drop_no_sync_data;
u64 tx_tls_drop_bypass_req;
- u64 tx_tls_dump_packets;
- u64 tx_tls_dump_bytes;
#endif
u64 rx_xsk_packets;
@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
u64 tls_encrypted_bytes;
u64 tls_ctx;
u64 tls_ooo;
+ u64 tls_dump_packets;
+ u64 tls_dump_bytes;
u64 tls_resync_bytes;
+ u64 tls_skip_no_sync_data;
u64 tls_drop_no_sync_data;
u64 tls_drop_bypass_req;
- u64 tls_dump_packets;
- u64 tls_dump_bytes;
#endif
/* less likely accessed in data path */
u64 csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3e78a727f3e6..bb970b2ebf8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
+ kfree(attr->parse_attr->tun_info[out_index]);
+ }
kvfree(attr->parse_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
+ kfree(e->tun_info);
kfree(e->encap_header);
kfree_rcu(e, rcu);
}
@@ -2238,13 +2241,14 @@ out_err:
struct mlx5_fields {
u8 field;
- u8 size;
+ u8 field_bsize;
+ u32 field_mask;
u32 offset;
u32 match_offset;
};
-#define OFFLOAD(fw_field, size, field, off, match_field) \
- {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
+#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
offsetof(struct pedit_headers, field) + (off), \
MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
@@ -2262,18 +2266,18 @@ struct mlx5_fields {
})
static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
- void *matchmaskp, int size)
+ void *matchmaskp, u8 bsize)
{
bool same = false;
- switch (size) {
- case sizeof(u8):
+ switch (bsize) {
+ case 8:
same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
break;
- case sizeof(u16):
+ case 16:
same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
break;
- case sizeof(u32):
+ case 32:
same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
break;
}
@@ -2282,41 +2286,43 @@ static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
}
static struct mlx5_fields fields[] = {
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
- OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0),
- OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
- OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0),
- OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype),
- OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid),
-
- OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit),
- OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-
- OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
+ OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
+ OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
+ OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
+ OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
+ OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
+ OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
+
+ OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
+ OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
+ OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+
+ OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
- OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0,
+ OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
- OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0,
+ OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
- OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0,
+ OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
+ OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
- OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0,
+ OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
- OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0,
+ OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
- OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0,
+ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
- OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
- OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport),
- OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport),
- OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
+ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
+ /* in linux iphdr tcp_flags is 8 bits long */
+ OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
- OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
- OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport),
+ OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
+ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
@@ -2329,19 +2335,17 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- void *headers_c = get_match_headers_criteria(*action_flags,
- &parse_attr->spec);
- void *headers_v = get_match_headers_value(*action_flags,
- &parse_attr->spec);
int i, action_size, nactions, max_actions, first, last, next_z;
- void *s_masks_p, *a_masks_p, *vals_p;
+ void *headers_c, *headers_v, *action, *vals_p;
+ u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5_fields *f;
- u8 cmd, field_bsize;
- u32 s_mask, a_mask;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
- void *action;
+ u8 cmd;
+
+ headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
+ headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
@@ -2366,8 +2370,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
- memcpy(&s_mask, s_masks_p, f->size);
- memcpy(&a_mask, a_masks_p, f->size);
+ s_mask = *s_masks_p & f->field_mask;
+ a_mask = *a_masks_p & f->field_mask;
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
@@ -2396,38 +2400,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
vals_p = (void *)set_vals + f->offset;
/* don't rewrite if we have a match on the same value */
if (cmp_val_mask(vals_p, s_masks_p, match_val,
- match_mask, f->size))
+ match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
- memset(s_masks_p, 0, f->size);
+ *s_masks_p &= ~f->field_mask;
} else {
- u32 zero = 0;
-
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
- if (!memcmp(vals_p, &zero, f->size))
+ if ((*(u32 *)vals_p & f->field_mask) == 0)
skip = true;
/* clear to denote we consumed this field */
- memset(a_masks_p, 0, f->size);
+ *a_masks_p &= ~f->field_mask;
}
if (skip)
continue;
- field_bsize = f->size * BITS_PER_BYTE;
-
- if (field_bsize == 32) {
+ if (f->field_bsize == 32) {
mask_be32 = *(__be32 *)&mask;
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (field_bsize == 16) {
+ } else if (f->field_bsize == 16) {
mask_be16 = *(__be16 *)&mask;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
- first = find_first_bit(&mask, field_bsize);
- next_z = find_next_zero_bit(&mask, field_bsize, first);
- last = find_last_bit(&mask, field_bsize);
+ first = find_first_bit(&mask, f->field_bsize);
+ next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+ last = find_last_bit(&mask, f->field_bsize);
if (first < next_z && next_z < last) {
NL_SET_ERR_MSG_MOD(extack,
"rewrite of few sub-fields isn't supported");
@@ -2440,16 +2440,22 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
- MLX5_SET(set_action_in, action, offset, first);
+ int start;
+
+ /* if field is bit sized it can start not from first bit */
+ start = find_first_bit((unsigned long *)&f->field_mask,
+ f->field_bsize);
+
+ MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */
MLX5_SET(set_action_in, action, length, (last - first + 1));
}
- if (field_bsize == 32)
+ if (f->field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
- else if (field_bsize == 16)
+ else if (f->field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
- else if (field_bsize == 8)
+ else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
action += action_size;
@@ -2972,6 +2978,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
return NULL;
}
+static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+ size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+ return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
@@ -3028,13 +3041,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
refcount_set(&e->refcnt, 1);
init_completion(&e->res_ready);
+ tun_info = dup_tun_info(tun_info);
+ if (!tun_info) {
+ err = -ENOMEM;
+ goto out_err_init;
+ }
e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err) {
- kfree(e);
- e = NULL;
- goto out_err;
- }
+ if (err)
+ goto out_err_init;
INIT_LIST_HEAD(&e->flows);
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
@@ -3075,6 +3090,12 @@ out_err:
if (e)
mlx5e_encap_put(priv, e);
return err;
+
+out_err_init:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ kfree(tun_info);
+ kfree(e);
+ return err;
}
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
@@ -3160,7 +3181,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
- int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+ int nest_level = attr->parse_attr->filter_dev->lower_level;
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
};
@@ -3295,7 +3316,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = info;
+ parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[attr->out_count])
+ return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
@@ -3423,6 +3446,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
+ if (!(attr->action &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
+ return -EOPNOTSUPP;
+ }
+
if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index d3a67a9b4eba..66951ff975f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
struct mlx5_err_cqe *err_cqe)
{
- u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+ struct mlx5_cqwq *wq = &sq->cq.wq;
+ u32 ci;
+
+ ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
netdev_err(sq->channel->netdev,
"Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
@@ -458,8 +461,14 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
+ struct mlx5e_tx_wqe_info *wi;
+ u16 ci;
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
queue_work(cq->channel->priv->wq,
&sq->recover_work);
}
@@ -479,14 +488,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
skb = wi->skb;
if (unlikely(!skb)) {
-#ifdef CONFIG_MLX5_EN_TLS
- if (wi->resync_dump_frag) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
-
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
- }
-#endif
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
sqcc += wi->num_wqebbs;
continue;
}
@@ -542,29 +544,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
+ u32 dma_fifo_cc;
+ u16 sqcc;
u16 ci;
int i;
- while (sq->cc != sq->pc) {
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ sqcc = sq->cc;
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ while (sqcc != sq->pc) {
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
skb = wi->skb;
- if (!skb) { /* nop */
- sq->cc++;
+ if (!skb) {
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
+ sqcc += wi->num_wqebbs;
continue;
}
for (i = 0; i < wi->num_dma; i++) {
struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+ mlx5e_dma_get(sq, dma_fifo_cc++);
mlx5e_tx_dma_unmap(sq->pdev, dma);
}
dev_kfree_skb_any(skb);
- sq->cc += wi->num_wqebbs;
+ sqcc += wi->num_wqebbs;
}
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
}
#ifdef CONFIG_MLX5_CORE_IPOIB
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 30aae76b6a1d..89a2806eceb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1831,6 +1831,15 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
flush_workqueue(esw->work_queue);
}
+static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ memset(&vport->info, 0, sizeof(vport->info));
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
@@ -1923,7 +1932,7 @@ abort:
return err;
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
{
int old_mode;
@@ -1952,6 +1961,8 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6bd6f5895244..804a7ed2b969 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -270,7 +270,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -603,7 +603,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 00d71db15f22..2276bb183705 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -1370,7 +1369,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw);
+ mlx5_eswitch_disable(esw, false);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -2196,7 +2195,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw);
+ mlx5_eswitch_disable(esw, false);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 1d55a324a17e..7879e1746297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -177,22 +177,32 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
+static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+ const struct mlx5_flow_spec *spec)
+{
+ u32 port_mask, port_value;
+
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
+ return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
+
+ port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+ misc_parameters.source_port);
+ port_value = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.source_port);
+ return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+}
+
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
- u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
- misc_parameters.source_port);
- u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
- misc_parameters.source_port);
-
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
return false;
/* push vlan on RX */
return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
- ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+ mlx5_eswitch_offload_is_uplink_port(esw, spec);
}
struct mlx5_flow_handle *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index eb8b0fe0b4e1..11621d265d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -35,11 +35,11 @@
#include <linux/mlx5/driver.h>
-enum mlx5_fpga_device_id {
- MLX5_FPGA_DEVICE_UNKNOWN = 0,
- MLX5_FPGA_DEVICE_KU040 = 1,
- MLX5_FPGA_DEVICE_KU060 = 2,
- MLX5_FPGA_DEVICE_KU060_2 = 3,
+enum mlx5_fpga_id {
+ MLX5_FPGA_NEWTON = 0,
+ MLX5_FPGA_EDISON = 1,
+ MLX5_FPGA_MORSE = 2,
+ MLX5_FPGA_MORSEQ = 3,
};
enum mlx5_fpga_image {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 4c50efe4e7f1..61021133029e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
}
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
- if (err)
+ if (err) {
+ kvfree(in);
goto err_cqwq;
+ }
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index d046d1ec2a86..2ce4241459ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -81,19 +81,28 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
}
}
-static const char *mlx5_fpga_device_name(u32 device)
+static const char *mlx5_fpga_name(u32 fpga_id)
{
- switch (device) {
- case MLX5_FPGA_DEVICE_KU040:
- return "ku040";
- case MLX5_FPGA_DEVICE_KU060:
- return "ku060";
- case MLX5_FPGA_DEVICE_KU060_2:
- return "ku060_2";
- case MLX5_FPGA_DEVICE_UNKNOWN:
- default:
- return "unknown";
+ static char ret[32];
+
+ switch (fpga_id) {
+ case MLX5_FPGA_NEWTON:
+ return "Newton";
+ case MLX5_FPGA_EDISON:
+ return "Edison";
+ case MLX5_FPGA_MORSE:
+ return "Morse";
+ case MLX5_FPGA_MORSEQ:
+ return "MorseQ";
}
+
+ snprintf(ret, sizeof(ret), "Unknown %d", fpga_id);
+ return ret;
+}
+
+static int mlx5_is_fpga_lookaside(u32 fpga_id)
+{
+ return fpga_id != MLX5_FPGA_NEWTON && fpga_id != MLX5_FPGA_EDISON;
}
static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
@@ -110,8 +119,12 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
fdev->last_admin_image = query.admin_image;
fdev->last_oper_image = query.oper_image;
- mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n",
- query.status, query.admin_image, query.oper_image);
+ mlx5_fpga_info(fdev, "Status %u; Admin image %u; Oper image %u\n",
+ query.status, query.admin_image, query.oper_image);
+
+ /* for FPGA lookaside projects FPGA load status is not important */
+ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
+ return 0;
if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
@@ -167,25 +180,30 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned int max_num_qps;
unsigned long flags;
- u32 fpga_device_id;
+ u32 fpga_id;
int err;
if (!fdev)
return 0;
- err = mlx5_fpga_device_load_check(fdev);
+ err = mlx5_fpga_caps(fdev->mdev);
if (err)
goto out;
- err = mlx5_fpga_caps(fdev->mdev);
+ err = mlx5_fpga_device_load_check(fdev);
if (err)
goto out;
- fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device);
- mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n",
- mlx5_fpga_device_name(fpga_device_id),
- fpga_device_id,
+ fpga_id = MLX5_CAP_FPGA(fdev->mdev, fpga_id);
+ mlx5_fpga_info(fdev, "FPGA card %s:%u\n", mlx5_fpga_name(fpga_id), fpga_id);
+
+ /* No QPs if FPGA does not participate in net processing */
+ if (mlx5_is_fpga_lookaside(fpga_id))
+ goto out;
+
+ mlx5_fpga_info(fdev, "%s(%d): image, version %u; SBU %06x:%04x version %d\n",
mlx5_fpga_image_name(fdev->last_oper_image),
+ fdev->last_oper_image,
MLX5_CAP_FPGA(fdev->mdev, image_version),
MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id),
MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id),
@@ -264,6 +282,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
if (!fdev)
return;
+ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id)))
+ return;
+
spin_lock_irqsave(&fdev->state_lock, flags);
if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) {
spin_unlock_irqrestore(&fdev->state_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 579c306caa7b..3c816e81f8d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
- if (extended_dest) {
+ if (extended_dest &&
+ dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
packet_reformat,
!!(dst->dest_attr.vport.flags &
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3bbb49354829..0246f5cdd355 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -531,9 +531,16 @@ static void del_hw_fte(struct fs_node *node)
}
}
+static void del_sw_fte_rcu(struct rcu_head *head)
+{
+ struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
+ struct mlx5_flow_steering *steering = get_steering(&fte->node);
+
+ kmem_cache_free(steering->ftes_cache, fte);
+}
+
static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
struct fs_fte *fte;
int err;
@@ -546,7 +553,8 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
- kmem_cache_free(steering->ftes_cache, fte);
+
+ call_rcu(&fte->rcu, del_sw_fte_rcu);
}
static void del_hw_flow_group(struct fs_node *node)
@@ -1623,22 +1631,47 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
}
static struct fs_fte *
-lookup_fte_locked(struct mlx5_flow_group *g,
- const u32 *match_value,
- bool take_write)
+lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value)
{
struct fs_fte *fte_tmp;
- if (take_write)
- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
- else
- nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
- rhash_fte);
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ fte_tmp = NULL;
+ goto out;
+ }
+
+ if (!fte_tmp->node.active) {
+ tree_put_node(&fte_tmp->node, false);
+ fte_tmp = NULL;
+ goto out;
+ }
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
+out:
+ up_write_ref_node(&g->node, false);
+ return fte_tmp;
+}
+
+static struct fs_fte *
+lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
+{
+ struct fs_fte *fte_tmp;
+
+ if (!tree_get_node(&g->node))
+ return NULL;
+
+ rcu_read_lock();
+ fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ rcu_read_unlock();
fte_tmp = NULL;
goto out;
}
+ rcu_read_unlock();
+
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
@@ -1646,14 +1679,21 @@ lookup_fte_locked(struct mlx5_flow_group *g,
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+
out:
- if (take_write)
- up_write_ref_node(&g->node, false);
- else
- up_read_ref_node(&g->node);
+ tree_put_node(&g->node, false);
return fte_tmp;
}
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
+{
+ if (write)
+ return lookup_fte_for_write_locked(g, match_value);
+ else
+ return lookup_fte_for_read_locked(g, match_value);
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1814,6 +1854,13 @@ search_again_locked:
return rule;
}
+ fte = alloc_fte(ft, spec, flow_act);
+ if (IS_ERR(fte)) {
+ up_write_ref_node(&ft->node, false);
+ err = PTR_ERR(fte);
+ goto err_alloc_fte;
+ }
+
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
up_write_ref_node(&ft->node, false);
@@ -1821,17 +1868,9 @@ search_again_locked:
if (err)
goto err_release_fg;
- fte = alloc_fte(ft, spec, flow_act);
- if (IS_ERR(fte)) {
- err = PTR_ERR(fte);
- goto err_release_fg;
- }
-
err = insert_fte(g, fte);
- if (err) {
- kmem_cache_free(steering->ftes_cache, fte);
+ if (err)
goto err_release_fg;
- }
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node, false);
@@ -1843,6 +1882,8 @@ search_again_locked:
err_release_fg:
up_write_ref_node(&g->node, false);
+ kmem_cache_free(steering->ftes_cache, fte);
+err_alloc_fte:
tree_put_node(&g->node, false);
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 00717eba2256..f278298b0f6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -202,6 +202,7 @@ struct fs_fte {
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
+ struct rcu_head rcu;
int modify_mask;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d685122d9ff7..e718170a80c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -390,7 +390,8 @@ static void print_health_info(struct mlx5_core_dev *dev)
static int
mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
struct mlx5_core_health *health = &dev->priv.health;
@@ -491,7 +492,8 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
static int
mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx)
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
int err;
@@ -545,7 +547,8 @@ static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx)
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
@@ -555,7 +558,8 @@ mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
#define MLX5_CR_DUMP_CHUNK_SIZE 256
static int
mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx)
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
u32 crdump_size = dev->priv.health.crdump_size;
@@ -572,7 +576,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
return -ENOMEM;
err = mlx5_crdump_collect(dev, cr_data);
if (err)
- return err;
+ goto free_data;
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index c5ef2ff26465..fc0d9583475d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -145,34 +145,35 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
{
*port1 = 1;
*port2 = 2;
- if (!tracker->netdev_state[0].tx_enabled ||
- !tracker->netdev_state[0].link_up) {
+ if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
+ !tracker->netdev_state[MLX5_LAG_P1].link_up) {
*port1 = 2;
return;
}
- if (!tracker->netdev_state[1].tx_enabled ||
- !tracker->netdev_state[1].link_up)
+ if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
+ !tracker->netdev_state[MLX5_LAG_P2].link_up)
*port2 = 1;
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u8 v2p_port1, v2p_port2;
int err;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2);
- if (v2p_port1 != ldev->v2p_map[0] ||
- v2p_port2 != ldev->v2p_map[1]) {
- ldev->v2p_map[0] = v2p_port1;
- ldev->v2p_map[1] = v2p_port2;
+ if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
+ v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
+ ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
- ldev->v2p_map[0], ldev->v2p_map[1]);
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
@@ -185,16 +186,17 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
- &ldev->v2p_map[1]);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
- ldev->v2p_map[0], ldev->v2p_map[1]);
+ ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
+ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
if (err)
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -207,7 +209,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
u8 flags)
{
bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
err = mlx5_create_lag(ldev, tracker);
@@ -229,7 +231,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
@@ -252,14 +254,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+ return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
+ ldev->pf[MLX5_LAG_P2].dev);
#else
- return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) &&
- !mlx5_sriov_is_enabled(ldev->pf[1].dev));
+ return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
+ !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
#endif
}
@@ -285,8 +288,8 @@ static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker;
bool do_bond, roce_lag;
int err;
@@ -692,10 +695,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
- ndev = ldev->tracker.netdev_state[0].tx_enabled ?
- ldev->pf[0].netdev : ldev->pf[1].netdev;
+ ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
+ ldev->pf[MLX5_LAG_P1].netdev :
+ ldev->pf[MLX5_LAG_P2].netdev;
} else {
- ndev = ldev->pf[0].netdev;
+ ndev = ldev->pf[MLX5_LAG_P1].netdev;
}
if (ndev)
dev_hold(ndev);
@@ -717,7 +721,8 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true;
ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
+ if (!ldev || !__mlx5_lag_is_roce(ldev) ||
+ ldev->pf[MLX5_LAG_P1].dev == dev)
return true;
/* If bonded, we do not add an IB device for PF1. */
@@ -746,11 +751,11 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
- mdev[0] = ldev->pf[0].dev;
- mdev[1] = ldev->pf[1].dev;
+ mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
+ mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
} else {
num_ports = 1;
- mdev[0] = dev;
+ mdev[MLX5_LAG_P1] = dev;
}
for (i = 0; i < num_ports; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 1dea0b1c9826..f1068aac6406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -8,6 +8,11 @@
#include "lag_mp.h"
enum {
+ MLX5_LAG_P1,
+ MLX5_LAG_P2,
+};
+
+enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 5d20d615663e..b70afa310ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,10 +11,11 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
+ ldev->pf[MLX5_LAG_P2].dev);
}
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
@@ -43,7 +44,8 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
* 2 - set affinity to port 2.
*
**/
-static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
+static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
+ enum mlx5_lag_port_affinity port)
{
struct lag_tracker tracker;
@@ -51,37 +53,37 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port)
return;
switch (port) {
- case 0:
- tracker.netdev_state[0].tx_enabled = true;
- tracker.netdev_state[1].tx_enabled = true;
- tracker.netdev_state[0].link_up = true;
- tracker.netdev_state[1].link_up = true;
+ case MLX5_LAG_NORMAL_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = true;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break;
- case 1:
- tracker.netdev_state[0].tx_enabled = true;
- tracker.netdev_state[0].link_up = true;
- tracker.netdev_state[1].tx_enabled = false;
- tracker.netdev_state[1].link_up = false;
+ case MLX5_LAG_P1_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = true;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = false;
break;
- case 2:
- tracker.netdev_state[0].tx_enabled = false;
- tracker.netdev_state[0].link_up = false;
- tracker.netdev_state[1].tx_enabled = true;
- tracker.netdev_state[1].link_up = true;
+ case MLX5_LAG_P2_AFFINITY:
+ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
+ tracker.netdev_state[MLX5_LAG_P1].link_up = false;
+ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
+ tracker.netdev_state[MLX5_LAG_P2].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d",
- port);
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[0].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events,
+ if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events,
+ if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -141,11 +143,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Verify next hops are ports of the same hca */
fib_nh0 = fib_info_nh(fi, 0);
fib_nh1 = fib_info_nh(fi, 1);
- if (!(fib_nh0->fib_nh_dev == ldev->pf[0].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[1].netdev) &&
- !(fib_nh0->fib_nh_dev == ldev->pf[1].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[0].netdev)) {
- mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
+ if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
+ !(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
+ fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -157,7 +160,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH);
}
- mlx5_lag_set_port_affinity(ldev, 0);
+ mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
mp->mfi = fi;
}
@@ -182,7 +185,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
- mlx5_lag_set_port_affinity(ldev, 0);
+ mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY);
}
}
@@ -248,9 +251,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct net_device *fib_dev;
struct fib_info *fi;
- if (!net_eq(info->net, &init_net))
- return NOTIFY_DONE;
-
if (info->family != AF_INET)
return NOTIFY_DONE;
@@ -270,8 +270,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return notifier_from_errno(-EINVAL);
}
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev != ldev->pf[0].netdev &&
- fib_dev != ldev->pf[1].netdev) {
+ if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
+ fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
return NOTIFY_DONE;
}
fib_work = mlx5_lag_init_fib_work(ldev, event);
@@ -311,8 +311,8 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
return 0;
mp->fib_nb.notifier_call = mlx5_lag_fib_event;
- err = register_fib_notifier(&mp->fib_nb,
- mlx5_lag_fib_event_flush);
+ err = register_fib_notifier(&init_net, &mp->fib_nb,
+ mlx5_lag_fib_event_flush, NULL);
if (err)
mp->fib_nb.notifier_call = NULL;
@@ -326,6 +326,6 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
if (!mp->fib_nb.notifier_call)
return;
- unregister_fib_notifier(&mp->fib_nb);
+ unregister_fib_notifier(&init_net, &mp->fib_nb);
mp->fib_nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
index 6d14b1100be9..79be89e9c7a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -7,6 +7,12 @@
#include "lag.h"
#include "mlx5_core.h"
+enum mlx5_lag_port_affinity {
+ MLX5_LAG_NORMAL_AFFINITY,
+ MLX5_LAG_P1_AFFINITY,
+ MLX5_LAG_P2_AFFINITY,
+};
+
struct lag_mp {
struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e47dd7c1b909..c9a091d3226c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1228,8 +1228,6 @@ function_teardown:
static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
- int err = 0;
-
if (cleanup) {
mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
@@ -1257,7 +1255,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
- return err;
+ return 0;
}
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 9231b39d18b2..c501bf2a0252 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -112,17 +112,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct xarray *mkeys = &dev->priv.mkey_table;
- struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
xa_lock_irqsave(mkeys, flags);
- deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
+ __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
xa_unlock_irqrestore(mkeys, flags);
- if (!deleted_mkey) {
- mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n",
- mlx5_base_mkey(mkey->key));
- return -ENOENT;
- }
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 61fcfd8b39b4..f641f1336402 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -108,7 +108,7 @@ enable_vfs_hca:
return 0;
}
-static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
+static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev, bool clear_vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int num_vfs = pci_num_vf(dev->pdev);
@@ -127,7 +127,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch);
+ mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -147,7 +147,7 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, true);
}
return err;
}
@@ -157,7 +157,7 @@ static void mlx5_sriov_disable(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
pci_disable_sriov(pdev);
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, true);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -192,7 +192,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return;
- mlx5_device_disable_sriov(dev);
+ mlx5_device_disable_sriov(dev, false);
}
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
deleted file mode 100644
index 9e2eccbb1eb8..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_crc32.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-/* Copyright (c) 2011-2015 Stephan Brumme. All rights reserved.
- * Slicing-by-16 contributed by Bulat Ziganshin
- *
- * This software is provided 'as-is', without any express or implied warranty.
- * In no event will the author be held liable for any damages arising from the
- * of this software.
- *
- * Permission is granted to anyone to use this software for any purpose,
- * including commercial applications, and to alter it and redistribute it
- * freely, subject to the following restrictions:
- *
- * 1. The origin of this software must not be misrepresented; you must not
- * claim that you wrote the original software.
- * 2. If you use this software in a product, an acknowledgment in the product
- * documentation would be appreciated but is not required.
- * 3. Altered source versions must be plainly marked as such, and must not be
- * misrepresented as being the original software.
- *
- * Taken from http://create.stephan-brumme.com/crc32/ and adapted.
- */
-
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-
-static u32 dr_ste_crc_tab32[8][256];
-
-static void dr_crc32_calc_lookup_entry(u32 (*tbl)[256], u8 i, u8 j)
-{
- tbl[i][j] = (tbl[i - 1][j] >> 8) ^ tbl[0][tbl[i - 1][j] & 0xff];
-}
-
-void mlx5dr_crc32_init_table(void)
-{
- u32 crc, i, j;
-
- for (i = 0; i < 256; i++) {
- crc = i;
- for (j = 0; j < 8; j++) {
- if (crc & 0x00000001L)
- crc = (crc >> 1) ^ DR_STE_CRC_POLY;
- else
- crc = crc >> 1;
- }
- dr_ste_crc_tab32[0][i] = crc;
- }
-
- /* Init CRC lookup tables according to crc_slice_8 algorithm */
- for (i = 0; i < 256; i++) {
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 1, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 2, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 3, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 4, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 5, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 6, i);
- dr_crc32_calc_lookup_entry(dr_ste_crc_tab32, 7, i);
- }
-}
-
-/* Compute CRC32 (Slicing-by-8 algorithm) */
-u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length)
-{
- const u32 *curr = (const u32 *)input_data;
- const u8 *curr_char;
- u32 crc = 0, one, two;
-
- if (!input_data)
- return 0;
-
- /* Process eight bytes at once (Slicing-by-8) */
- while (length >= 8) {
- one = *curr++ ^ crc;
- two = *curr++;
-
- crc = dr_ste_crc_tab32[0][(two >> 24) & 0xff]
- ^ dr_ste_crc_tab32[1][(two >> 16) & 0xff]
- ^ dr_ste_crc_tab32[2][(two >> 8) & 0xff]
- ^ dr_ste_crc_tab32[3][two & 0xff]
- ^ dr_ste_crc_tab32[4][(one >> 24) & 0xff]
- ^ dr_ste_crc_tab32[5][(one >> 16) & 0xff]
- ^ dr_ste_crc_tab32[6][(one >> 8) & 0xff]
- ^ dr_ste_crc_tab32[7][one & 0xff];
-
- length -= 8;
- }
-
- curr_char = (const u8 *)curr;
- /* Remaining 1 to 7 bytes (standard algorithm) */
- while (length-- != 0)
- crc = (crc >> 8) ^ dr_ste_crc_tab32[0][(crc & 0xff)
- ^ *curr_char++];
-
- return ((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
- ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 5b24732b18c0..a9da961d4d2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -326,9 +326,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_resourses;
}
- /* Init CRC table for htbl CRC calculation */
- mlx5dr_crc32_init_table();
-
return dmn;
uninit_resourses:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 913f1e5aaaf2..d7c7467e2d53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -137,7 +137,8 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->icm_start_addr = icm_mr->dm.addr;
- align_diff = icm_mr->icm_start_addr % align_base;
+ /* align_base is always a power of 2 */
+ align_diff = icm_mr->icm_start_addr & (align_base - 1);
if (align_diff)
icm_mr->used_length = align_base - align_diff;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 67dea7698fc9..5db947df8763 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -146,17 +146,15 @@ dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn)
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6)
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv)
{
- if (ipv6) {
- nic_matcher->ste_builder = nic_matcher->ste_builder6;
- nic_matcher->num_of_builders = nic_matcher->num_of_builders6;
- } else {
- nic_matcher->ste_builder = nic_matcher->ste_builder4;
- nic_matcher->num_of_builders = nic_matcher->num_of_builders4;
- }
+ nic_matcher->ste_builder =
+ nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
+ nic_matcher->num_of_builders =
+ nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
- if (!nic_matcher->num_of_builders) {
+ if (!nic_matcher->ste_builder) {
mlx5dr_dbg(matcher->tbl->dmn,
"Rule not supported on this matcher due to IP related fields\n");
return -EINVAL;
@@ -167,26 +165,19 @@ int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6)
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {};
struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb;
- u8 *num_of_builders;
bool inner, rx;
int idx = 0;
int ret, i;
- if (ipv6) {
- sb = nic_matcher->ste_builder6;
- num_of_builders = &nic_matcher->num_of_builders6;
- } else {
- sb = nic_matcher->ste_builder4;
- num_of_builders = &nic_matcher->num_of_builders4;
- }
-
+ sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
/* Create a temporary mask to track and clear used mask fields */
@@ -249,7 +240,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
- if (ipv6) {
+ if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
@@ -325,7 +316,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
- if (ipv6) {
+ if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
inner, rx);
@@ -373,7 +364,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
}
}
- *num_of_builders = idx;
+ nic_matcher->ste_builder = sb;
+ nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
return 0;
}
@@ -524,24 +516,33 @@ static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
}
}
-static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
- struct mlx5dr_matcher_rx_tx *nic_matcher)
+static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- int ret, ret_v4, ret_v6;
- ret_v4 = dr_matcher_set_ste_builders(matcher, nic_matcher, false);
- ret_v6 = dr_matcher_set_ste_builders(matcher, nic_matcher, true);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
+ dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
- if (ret_v4 && ret_v6) {
+ if (!nic_matcher->ste_builder) {
mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
- if (!ret_v4)
- nic_matcher->ste_builder = nic_matcher->ste_builder4;
- else
- nic_matcher->ste_builder = nic_matcher->ste_builder6;
+ return 0;
+}
+
+static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret;
+
+ ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
+ if (ret)
+ return ret;
nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
DR_CHUNK_SIZE_1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 4187f2b112b8..90c79a133692 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -788,12 +788,10 @@ again:
* it means that all the previous stes are the same,
* if so, this rule is duplicated.
*/
- if (mlx5dr_ste_is_last_in_rule(nic_matcher,
- matched_ste->ste_chain_location)) {
- mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
- return NULL;
- }
- return matched_ste;
+ if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
+ return matched_ste;
+
+ mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
}
if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
@@ -956,12 +954,12 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
return 0;
}
-static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
+static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
{
- return (param->outer.ip_version == 6 ||
- param->inner.ip_version == 6 ||
- param->outer.ethertype == ETH_P_IPV6 ||
- param->inner.ethertype == ETH_P_IPV6);
+ if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
+ return DR_RULE_IPV6;
+
+ return DR_RULE_IPV4;
}
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
@@ -1025,7 +1023,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
- dr_rule_is_ipv6(param));
+ dr_rule_get_ipv(&param->outer),
+ dr_rule_get_ipv(&param->inner));
if (ret)
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 4efe1b0be4a8..7e9d6cfc356f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/types.h>
+#include <linux/crc32.h>
#include "dr_types.h"
#define DR_STE_CRC_POLY 0xEDB88320L
@@ -107,6 +108,13 @@ struct dr_hw_ste_format {
u8 mask[DR_STE_SIZE_MASK];
};
+static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
+{
+ u32 crc = crc32(0, input_data, length);
+
+ return htonl(crc);
+}
+
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -128,7 +136,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
bit = bit >> 1;
}
- crc32 = mlx5dr_crc32_slice8_calc(masked, DR_STE_SIZE_TAG);
+ crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
index = crc32 & (htbl->chunk->num_of_entries - 1);
return index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1cb3769d4e3c..c1f45a60ee6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -106,6 +106,12 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_MAX,
};
+enum mlx5dr_ipv {
+ DR_RULE_IPV4,
+ DR_RULE_IPV6,
+ DR_RULE_IPV_MAX,
+};
+
struct mlx5dr_icm_pool;
struct mlx5dr_icm_chunk;
struct mlx5dr_icm_bucket;
@@ -679,11 +685,11 @@ struct mlx5dr_matcher_rx_tx {
struct mlx5dr_ste_htbl *s_htbl;
struct mlx5dr_ste_htbl *e_anchor;
struct mlx5dr_ste_build *ste_builder;
- struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
- struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
+ struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
+ [DR_RULE_IPV_MAX]
+ [DR_RULE_MAX_STES];
u8 num_of_builders;
- u8 num_of_builders4;
- u8 num_of_builders6;
+ u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl;
};
@@ -812,7 +818,8 @@ mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
- bool ipv6);
+ enum mlx5dr_ipv outer_ipv,
+ enum mlx5dr_ipv inner_ipv);
static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
@@ -962,9 +969,6 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask);
-void mlx5dr_crc32_init_table(void);
-u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
-
struct mlx5dr_qp {
struct mlx5_core_dev *mdev;
struct mlx5_wq_qp wq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index dd2315ce4441..f2a0e72285ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -34,26 +34,6 @@
#include "wq.h"
#include "mlx5_core.h"
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
-{
- return (u32)wq->fbc.sz_m1 + 1;
-}
-
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
-{
- return wq->fbc.sz_m1 + 1;
-}
-
-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
-{
- return wq->fbc.log_stride;
-}
-
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
-{
- return (u32)wq->fbc.sz_m1 + 1;
-}
-
static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
return ((u32)1 << log_sz) << log_stride;
@@ -96,6 +76,24 @@ err_db_free:
return err;
}
+void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
+{
+ size_t len;
+ void *wqe;
+
+ if (!net_ratelimit())
+ return;
+
+ nstrides = max_t(u8, nstrides, 1);
+
+ len = nstrides << wq->fbc.log_stride;
+ wqe = mlx5_wq_cyc_get_wqe(wq, ix);
+
+ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n",
+ mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
+}
+
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 55791f71a778..d9a94bc223c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -79,7 +79,7 @@ struct mlx5_wq_ll {
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -88,16 +88,18 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq)
{
return wq->cur_sz == wq->sz;
@@ -168,6 +170,16 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
return !equal && !smaller;
}
+static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.sz_m1 + 1;
+}
+
+static inline u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.log_stride;
+}
+
static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
{
return ctr & wq->fbc.sz_m1;
@@ -224,6 +236,11 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe;
}
+static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->fbc.sz_m1 + 1;
+}
+
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->fbc.sz_m1;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 14dcc786926d..e1a90f5bddd0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -127,6 +127,16 @@ bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev)
+{
+ return rev->minor > req_rev->minor ||
+ (rev->minor == req_rev->minor &&
+ rev->subminor >= req_rev->subminor);
+}
+EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
+
struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
@@ -985,6 +995,7 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
+ bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -1005,7 +1016,7 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
- devlink);
+ devlink, extack);
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -1098,7 +1109,8 @@ static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink)
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
@@ -1172,7 +1184,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
if (err)
goto err_driver_init;
}
@@ -1186,7 +1198,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- if (mlxsw_driver->params_register && !reload)
+ if (mlxsw_driver->params_register)
devlink_params_publish(devlink);
return 0;
@@ -1223,14 +1235,16 @@ err_devlink_alloc:
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink)
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
bool called_again = false;
int err;
again:
err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
- bus_priv, reload, devlink);
+ bus_priv, reload,
+ devlink, extack);
/* -EAGAIN is returned in case the FW was updated. FW needs
* a reset, so lets try to call __mlxsw_core_bus_device_register()
* again.
@@ -1259,7 +1273,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- if (mlxsw_core->driver->params_unregister && !reload)
+ if (mlxsw_core->driver->params_unregister)
devlink_params_unpublish(devlink);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
@@ -2003,6 +2017,35 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ enum mlxsw_reg_pmtm_module_type module_type;
+ char pmtm_pl[MLXSW_REG_PMTM_LEN];
+ int err;
+
+ mlxsw_reg_pmtm_pack(pmtm_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
+
+ /* Here we need to get the module width according to the module type. */
+
+ switch (module_type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP:
+ return 4;
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ return 2;
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(mlxsw_core_module_max_width);
+
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 5d7d2ab6d155..0d18bee6d140 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/net_namespace.h>
#include <net/devlink.h>
#include "trap.h"
@@ -23,6 +24,7 @@ struct mlxsw_core_port;
struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
@@ -30,13 +32,18 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
+bool
+mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
+ const struct mlxsw_fw_rev *req_rev);
+
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
- struct devlink *devlink);
+ struct devlink *devlink,
+ struct netlink_ext_ack *extack);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
@@ -193,6 +200,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
@@ -252,7 +260,8 @@ struct mlxsw_driver {
const char *kind;
size_t priv_size;
int (*init)(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info);
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
@@ -350,6 +359,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \
mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
+static inline struct net *mlxsw_core_net(struct mlxsw_core *mlxsw_core)
+{
+ return devlink_net(priv_to_devlink(mlxsw_core));
+}
+
#define MLXSW_BUS_F_TXRX BIT(0)
#define MLXSW_BUS_F_RESET BIT(1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index d2c7ce67c300..08215fed193d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -50,6 +50,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
u16 i2c_addr;
+ u8 page = 0;
int status;
int err;
@@ -62,11 +63,21 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
- offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ page = MLXSW_REG_MCIA_PAGE_GET(offset);
+ offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
+ /* When reading upper pages 1, 2 and 3 the offset starts at
+ * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436
+ * specification for graphical depiction.
+ * MCIA register accepts buffer size <= 48. Page of size 128
+ * should be read by chunks of size 48, 48, 32. Align the size
+ * of the last chunk to avoid reading after the end of the
+ * page.
+ */
+ if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
}
- mlxsw_reg_mcia_pack(mcia_pl, module, 0, 0, offset, size, i2c_addr);
+ mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
if (err)
@@ -168,7 +179,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
switch (module_id) {
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28:
@@ -176,10 +187,10 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
module_rev_id >=
MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) {
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 5b00726c4346..9bf8da5f6daf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -41,7 +41,7 @@ struct mlxsw_hwmon {
struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT];
unsigned int attrs_count;
u8 sensor_count;
- u8 module_sensor_count;
+ u8 module_sensor_max;
};
static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
@@ -56,7 +56,7 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
int err;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -79,7 +79,7 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
int err;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -109,7 +109,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
return -EINVAL;
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
- mlxsw_hwmon->module_sensor_count);
+ mlxsw_hwmon->module_sensor_max);
mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -336,7 +336,7 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
int index = mlwsw_hwmon_attr->type_index -
- mlxsw_hwmon->module_sensor_count + 1;
+ mlxsw_hwmon->module_sensor_max + 1;
return sprintf(buf, "gearbox %03u\n", index);
}
@@ -528,51 +528,45 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon)
static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
- unsigned int module_count = mlxsw_core_max_ports(mlxsw_hwmon->core);
- char pmlp_pl[MLXSW_REG_PMLP_LEN] = {0};
- int i, index;
- u8 width;
- int err;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 module_sensor_max;
+ int i, err;
if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
return 0;
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &module_sensor_max);
+
/* Add extra attributes for module temperature. Sensor index is
* assigned to sensor_count value, while all indexed before
* sensor_count are already utilized by the sensors connected through
* mtmp register by mlxsw_hwmon_temp_init().
*/
- index = mlxsw_hwmon->sensor_count;
- for (i = 1; i < module_count; i++) {
- mlxsw_reg_pmlp_pack(pmlp_pl, i);
- err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(pmlp),
- pmlp_pl);
- if (err) {
- dev_err(mlxsw_hwmon->bus_info->dev, "Failed to read module index %d\n",
- i);
- return err;
- }
- width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- if (!width)
- continue;
+ mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count +
+ module_sensor_max;
+ for (i = mlxsw_hwmon->sensor_count;
+ i < mlxsw_hwmon->module_sensor_max; i++) {
mlxsw_hwmon_attr_add(mlxsw_hwmon,
- MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, index,
- index);
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT,
- index, index);
+ i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
- MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT,
- index, index);
+ MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i,
+ i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
- index, index);
+ i, i);
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
- index, index);
- index++;
+ i, i);
}
- mlxsw_hwmon->module_sensor_count = index;
return 0;
}
@@ -590,14 +584,14 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
if (!gbox_num)
return 0;
- index = mlxsw_hwmon->module_sensor_count;
- max_index = mlxsw_hwmon->module_sensor_count + gbox_num;
+ index = mlxsw_hwmon->module_sensor_max;
+ max_index = mlxsw_hwmon->module_sensor_max + gbox_num;
while (index < max_index) {
- sensor_index = index % mlxsw_hwmon->module_sensor_count +
+ sensor_index = index % mlxsw_hwmon->module_sensor_max +
MLXSW_REG_MTMP_GBOX_INDEX_MIN;
mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 35a1dc89c28a..c721b171bd8d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -112,6 +112,7 @@ struct mlxsw_thermal {
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
struct mlxsw_thermal_module *tz_module_arr;
+ u8 tz_module_num;
struct mlxsw_thermal_module *tz_gearbox_arr;
u8 tz_gearbox_num;
unsigned int tz_highest_score;
@@ -775,23 +776,10 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
static int
mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal, u8 local_port)
+ struct mlxsw_thermal *thermal, u8 module)
{
struct mlxsw_thermal_module *module_tz;
- char pmlp_pl[MLXSW_REG_PMLP_LEN];
- u8 width, module;
- int err;
-
- mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
- err = mlxsw_reg_query(core, MLXSW_REG(pmlp), pmlp_pl);
- if (err)
- return err;
- width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- if (!width)
- return 0;
-
- module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
module_tz = &thermal->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
@@ -819,26 +807,34 @@ static int
mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
- unsigned int module_count = mlxsw_core_max_ports(core);
struct mlxsw_thermal_module *module_tz;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
if (!mlxsw_core_res_query_enabled(core))
return 0;
- thermal->tz_module_arr = kcalloc(module_count,
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL,
+ &thermal->tz_module_num);
+
+ thermal->tz_module_arr = kcalloc(thermal->tz_module_num,
sizeof(*thermal->tz_module_arr),
GFP_KERNEL);
if (!thermal->tz_module_arr)
return -ENOMEM;
- for (i = 1; i < module_count; i++) {
+ for (i = 0; i < thermal->tz_module_num; i++) {
err = mlxsw_thermal_module_init(dev, core, thermal, i);
if (err)
goto err_unreg_tz_module_arr;
}
- for (i = 0; i < module_count - 1; i++) {
+ for (i = 0; i < thermal->tz_module_num; i++) {
module_tz = &thermal->tz_module_arr[i];
if (!module_tz->parent)
continue;
@@ -850,7 +846,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
return 0;
err_unreg_tz_module_arr:
- for (i = module_count - 1; i >= 0; i--)
+ for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
return err;
@@ -859,13 +855,12 @@ err_unreg_tz_module_arr:
static void
mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
{
- unsigned int module_count = mlxsw_core_max_ports(thermal->core);
int i;
if (!mlxsw_core_res_query_enabled(thermal->core))
return;
- for (i = module_count - 1; i >= 0; i--)
+ for (i = thermal->tz_module_num - 1; i >= 0; i--)
mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
kfree(thermal->tz_module_arr);
}
@@ -913,7 +908,8 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL);
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
+ NULL);
if (!thermal->tz_gearbox_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 95f408d0e103..34566eb62c47 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -640,7 +640,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c, false,
- NULL);
+ NULL, NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 471b0ca6d69a..2b543911ae00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -16,6 +16,14 @@
static const char mlxsw_m_driver_name[] = "mlxsw_minimal";
+#define MLXSW_M_FWREV_MINOR 2000
+#define MLXSW_M_FWREV_SUBMINOR 1886
+
+static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
+ .minor = MLXSW_M_FWREV_MINOR,
+ .subminor = MLXSW_M_FWREV_SUBMINOR,
+};
+
struct mlxsw_m_port;
struct mlxsw_m {
@@ -172,6 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
}
SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_m->core));
mlxsw_m_port = netdev_priv(dev);
mlxsw_m_port->dev = dev;
mlxsw_m_port->mlxsw_m = mlxsw_m;
@@ -325,8 +334,27 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
kfree(mlxsw_m->ports);
}
+static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_m->bus_info->fw_rev;
+
+ /* Validate driver and FW are compatible.
+ * Do not check major version, since it defines chip type, while
+ * driver is supposed to support any type.
+ */
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, &mlxsw_m_fw_rev))
+ return 0;
+
+ dev_err(mlxsw_m->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, rev->major,
+ mlxsw_m_fw_rev.minor, mlxsw_m_fw_rev.subminor);
+
+ return -EINVAL;
+}
+
static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
int err;
@@ -334,6 +362,10 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
mlxsw_m->core = mlxsw_core;
mlxsw_m->bus_info = mlxsw_bus_info;
+ err = mlxsw_m_fw_rev_validate(mlxsw_m);
+ if (err)
+ return err;
+
err = mlxsw_m_base_mac_get(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 615455a21567..914c33e46fb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -284,15 +284,18 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ int tclass;
int i;
int err;
q->producer_counter = 0;
q->consumer_counter = 0;
+ tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
+ MLXSW_PCI_SDQ_CTL_TC;
/* Set CQ of same number of this SDQ. */
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
- mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -963,6 +966,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
if (num_sdqs + num_rdqs > num_cqs ||
+ num_sdqs < MLXSW_PCI_SDQS_MIN ||
num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
@@ -1520,7 +1524,15 @@ static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
{
- u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+ u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 sdqn;
+
+ if (tx_info->is_emad) {
+ sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
+ } else {
+ BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
+ sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
+ }
return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
}
@@ -1790,7 +1802,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
- NULL);
+ NULL, NULL);
if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index e57e42e2d2b2..e0d7d2d9a0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -51,6 +51,11 @@
#define MLXSW_PCI_EQ_ASYNC_NUM 0
#define MLXSW_PCI_EQ_COMP_NUM 1
+#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
+#define MLXSW_PCI_SDQ_EMAD_INDEX 0
+#define MLXSW_PCI_SDQ_EMAD_TC 0
+#define MLXSW_PCI_SDQ_CTL_TC 3
+
#define MLXSW_PCI_AQ_PAGES 8
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index a33eeef0b00c..741fd2989d12 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -24,8 +24,6 @@
#define MLXSW_PORT_DONT_CARE 0xFF
-#define MLXSW_PORT_MODULE_MAX_WIDTH 4
-
enum mlxsw_port_admin_status {
MLXSW_PORT_ADMIN_STATUS_UP = 1,
MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 5494cf93f34c..bec035ee5349 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3969,6 +3969,7 @@ MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
* 1 - Lane 0 is used.
* 2 - Lanes 0 and 1 are used.
* 4 - Lanes 0, 1, 2 and 3 are used.
+ * 8 - Lanes 0-7 are used.
* Access: RW
*/
MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
@@ -3983,14 +3984,14 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
/* reg_pmlp_rx_lane
* Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
* equal to Tx lane.
* Access: RW
*/
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
{
@@ -4111,6 +4112,7 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
/* reg_ptys_ext_eth_proto_cap
* Extended Ethernet port supported speeds and protocols.
@@ -5373,6 +5375,55 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMTM - Port Module Type Mapping Register
+ * ----------------------------------------
+ * The PMTM allows query or configuration of module types.
+ */
+#define MLXSW_REG_PMTM_ID 0x5067
+#define MLXSW_REG_PMTM_LEN 0x10
+
+MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+
+/* reg_pmtm_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmtm_module_type {
+ /* Backplane with 4 lanes */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
+ /* QSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP,
+ /* SFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP,
+ /* Backplane with single lane */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
+ /* Backplane with two lane */
+ MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
+ /* Chip2Chip */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C = 10,
+};
+
+/* reg_pmtm_module_type
+ * Module type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+
+static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(pmtm, payload);
+ mlxsw_reg_pmtm_module_set(payload, module);
+}
+
+static inline void
+mlxsw_reg_pmtm_unpack(char *payload,
+ enum mlxsw_reg_pmtm_module_type *module_type)
+{
+ *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -8411,6 +8462,7 @@ MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16);
MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
+#define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128
#define MLXSW_REG_MCIA_EEPROM_SIZE 48
#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
@@ -8446,6 +8498,14 @@ enum mlxsw_reg_mcia_eeprom_module_info {
*/
MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
+/* This is used to access the optional upper pages (1-3) in the QSFP+
+ * memory map. Page 1 is available on offset 256 through 383, page 2 -
+ * on offset 384 through 511, page 3 - on offset 512 through 639.
+ */
+#define MLXSW_REG_MCIA_PAGE_GET(off) (((off) - \
+ MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \
+ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
+
static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock,
u8 page_number, u16 device_addr,
u8 size, u8 i2c_device_addr)
@@ -8670,7 +8730,7 @@ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
* properties.
*/
#define MLXSW_REG_MPAR_ID 0x901B
-#define MLXSW_REG_MPAR_LEN 0x08
+#define MLXSW_REG_MPAR_LEN 0x0C
MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
@@ -9531,6 +9591,12 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8);
+/* num_of_modules
+ * Number of modules.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8);
+
static inline void mlxsw_reg_mgpir_pack(char *payload)
{
MLXSW_REG_ZERO(mgpir, payload);
@@ -9539,7 +9605,7 @@ static inline void mlxsw_reg_mgpir_pack(char *payload)
static inline void
mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
enum mlxsw_reg_mgpir_device_type *device_type,
- u8 *devices_per_flash)
+ u8 *devices_per_flash, u8 *num_of_modules)
{
if (num_of_devices)
*num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload);
@@ -9548,6 +9614,8 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
if (devices_per_flash)
*devices_per_flash =
mlxsw_reg_mgpir_devices_per_flash_get(payload);
+ if (num_of_modules)
+ *num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
}
/* TNGCR - Tunneling NVE General Configuration Register
@@ -10526,6 +10594,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
MLXSW_REG(pplr),
+ MLXSW_REG(pmtm),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 33a9fc9ef6a4..6534184cb942 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -26,7 +26,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
- MLXSW_RES_ID_MAX_BUFFER_SIZE,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
+ MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
@@ -82,7 +83,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
[MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
- [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
+ [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index dcf9562bce8a..ea4cc2aa99e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -22,6 +22,7 @@
#include <linux/inetdevice.h>
#include <linux/netlink.h>
#include <linux/jhash.h>
+#include <linux/log2.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -48,7 +49,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 1886
+#define MLXSW_SP1_FWREV_SUBMINOR 2308
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,6 +64,21 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_MINOR) \
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP2_FWREV_MAJOR 29
+#define MLXSW_SP2_FWREV_MINOR 2000
+#define MLXSW_SP2_FWREV_SUBMINOR 2308
+
+static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
+ .major = MLXSW_SP2_FWREV_MAJOR,
+ .minor = MLXSW_SP2_FWREV_MINOR,
+ .subminor = MLXSW_SP2_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP2_FW_FILENAME \
+ "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
+ "." __stringify(MLXSW_SP2_FWREV_MINOR) \
+ "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
+
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
@@ -409,9 +425,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
}
if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
- (rev->minor > req_rev->minor ||
- (rev->minor == req_rev->minor &&
- rev->subminor >= req_rev->subminor)))
+ mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
@@ -735,35 +749,69 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *p_module,
- u8 *p_width, u8 *p_lane)
+static int
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ bool separate_rxtx;
+ u8 module;
+ u8 width;
int err;
+ int i;
mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
if (err)
return err;
- *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
- *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
- *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
+ module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
+
+ if (width && !is_power_of_2(width)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < width; i++) {
+ if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (separate_rxtx &&
+ mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
+ mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
+ local_port);
+ return -EINVAL;
+ }
+ if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
+ local_port);
+ return -EINVAL;
+ }
+ }
+
+ port_mapping->module = module;
+ port_mapping->width = width;
+ port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 module, u8 width, u8 lane)
+static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int i;
mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
- mlxsw_reg_pmlp_width_set(pmlp_pl, width);
- for (i = 0; i < width; i++) {
- mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
- mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
+ mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
+ for (i = 0; i < port_mapping->width; i++) {
+ mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
+ mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
@@ -2914,9 +2962,22 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_8[] = {
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
+
#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
+#define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3)
static u8 mlxsw_sp_port_mask_width_get(u8 width)
{
@@ -2927,6 +2988,8 @@ static u8 mlxsw_sp_port_mask_width_get(u8 width)
return MLXSW_SP_PORT_MASK_WIDTH_2X;
case 4:
return MLXSW_SP_PORT_MASK_WIDTH_4X;
+ case 8:
+ return MLXSW_SP_PORT_MASK_WIDTH_8X;
default:
WARN_ON_ONCE(1);
return 0;
@@ -2948,7 +3011,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100,
},
{
@@ -2957,7 +3021,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_1000,
},
{
@@ -2966,7 +3031,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_2500,
},
{
@@ -2975,7 +3041,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_5000,
},
{
@@ -2984,14 +3051,16 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_40000,
},
{
@@ -3000,7 +3069,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_25000,
},
{
@@ -3008,7 +3078,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
.mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X,
+ MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_50000,
},
{
@@ -3022,7 +3093,8 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100000,
},
{
@@ -3036,9 +3108,17 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_200000,
},
+ {
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .speed = SPEED_400000,
+ },
};
#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
@@ -3435,7 +3515,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
};
static int
-mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3451,7 +3531,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
&base_speed);
if (err)
return err;
- upper_speed = base_speed * width;
+ upper_speed = base_speed * mlxsw_sp_port->mapping.width;
eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
@@ -3612,15 +3692,18 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
+ u8 split_base_local_port,
+ struct mlxsw_sp_port_mapping *port_mapping)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool split = !!split_base_local_port;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
int err;
err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
- module + 1, split, lane / width,
+ port_mapping->module + 1, split,
+ port_mapping->lane / port_mapping->width,
mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
if (err) {
@@ -3635,15 +3718,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_etherdev;
}
SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
+ dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
- mlxsw_sp_port->mapping.module = module;
- mlxsw_sp_port->mapping.width = width;
- mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->split_base_local_port = split_base_local_port;
+ mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
@@ -3668,7 +3751,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
- err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
+ err = mlxsw_sp_port_module_map(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
mlxsw_sp_port->local_port);
@@ -3710,7 +3793,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
+ err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
mlxsw_sp_port->local_port);
@@ -3933,14 +4016,13 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
- kfree(mlxsw_sp->port_to_module);
kfree(mlxsw_sp->ports);
}
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- u8 module, width, lane;
+ struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
int i;
int err;
@@ -3950,66 +4032,98 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
- GFP_KERNEL);
- if (!mlxsw_sp->port_to_module) {
- err = -ENOMEM;
- goto err_port_to_module_alloc;
- }
-
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
for (i = 1; i < max_ports; i++) {
- /* Mark as invalid */
- mlxsw_sp->port_to_module[i] = -1;
-
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width, &lane);
- if (err)
- goto err_port_module_info_get;
- if (!width)
+ port_mapping = mlxsw_sp->port_mapping[i];
+ if (!port_mapping)
continue;
- mlxsw_sp->port_to_module[i] = module;
- err = mlxsw_sp_port_create(mlxsw_sp, i, false,
- module, width, lane);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
-err_port_module_info_get:
for (i--; i >= 1; i--)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
- kfree(mlxsw_sp->port_to_module);
-err_port_to_module_alloc:
kfree(mlxsw_sp->ports);
return err;
}
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
+static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
{
- u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_port_mapping port_mapping;
+ int i;
+ int err;
+
+ mlxsw_sp->port_mapping = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_port_mapping *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping)
+ return -ENOMEM;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
+ if (err)
+ goto err_port_module_info_get;
+ if (!port_mapping.width)
+ continue;
+
+ mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
+ sizeof(port_mapping),
+ GFP_KERNEL);
+ if (!mlxsw_sp->port_mapping[i])
+ goto err_port_module_info_dup;
+ }
+ return 0;
+
+err_port_module_info_get:
+err_port_module_info_dup:
+ for (i--; i >= 1; i--)
+ kfree(mlxsw_sp->port_mapping[i]);
+ kfree(mlxsw_sp->port_mapping);
+ return err;
+}
+
+static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
+ kfree(mlxsw_sp->port_mapping[i]);
+ kfree(mlxsw_sp->port_mapping);
+}
+
+static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
+{
+ u8 offset = (local_port - 1) % max_width;
return local_port - offset;
}
-static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
- u8 module, unsigned int count, u8 offset)
+static int
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ struct mlxsw_sp_port_mapping *port_mapping,
+ unsigned int count, u8 offset)
{
- u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ struct mlxsw_sp_port_mapping split_port_mapping;
int err, i;
+ split_port_mapping = *port_mapping;
+ split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
- true, module, width, i * width);
+ base_port, &split_port_mapping);
if (err)
goto err_port_create;
+ split_port_mapping.lane += split_port_mapping.width;
}
return 0;
@@ -4022,45 +4136,55 @@ err_port_create:
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
- u8 base_port, unsigned int count)
+ u8 base_port,
+ unsigned int count, u8 offset)
{
- u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ struct mlxsw_sp_port_mapping *port_mapping;
int i;
- /* Split by four means we need to re-create two ports, otherwise
- * only one.
- */
- count = count / 2;
-
- for (i = 0; i < count; i++) {
- local_port = base_port + i * 2;
- if (mlxsw_sp->port_to_module[local_port] < 0)
+ /* Go over original unsplit ports in the gap and recreate them. */
+ for (i = 0; i < count * offset; i++) {
+ port_mapping = mlxsw_sp->port_mapping[base_port + i];
+ if (!port_mapping)
continue;
- module = mlxsw_sp->port_to_module[local_port];
-
- mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
- width, 0);
+ mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
}
}
+static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
+ unsigned int count,
+ unsigned int max_width)
+{
+ enum mlxsw_res_id local_ports_in_x_res_id;
+ int split_width = max_width / count;
+
+ if (split_width == 1)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
+ else if (split_width == 2)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
+ else if (split_width == 4)
+ local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
+ else
+ return -EINVAL;
+
+ if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
+ return -EINVAL;
+ return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
+}
+
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 local_ports_in_1x, local_ports_in_2x, offset;
+ struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ int max_width;
+ u8 base_port;
+ int offset;
int i;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
- return -EIO;
-
- local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
- local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4069,47 +4193,70 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- module = mlxsw_sp_port->mapping.module;
- cur_width = mlxsw_sp_port->mapping.width;
+ /* Split ports cannot be split. */
+ if (mlxsw_sp_port->split) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+ return -EINVAL;
+ }
+
+ max_width = mlxsw_core_module_max_width(mlxsw_core,
+ mlxsw_sp_port->mapping.module);
+ if (max_width < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+ return max_width;
+ }
- if (count != 2 && count != 4) {
- netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
- NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
+ /* Split port with non-max and 1 module width cannot be split. */
+ if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
+ NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
return -EINVAL;
}
- if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
- netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
- NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
+ if (count == 1 || !is_power_of_2(count) || count > max_width) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split count\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid split count");
return -EINVAL;
}
- /* Make sure we have enough slave (even) ports for the split. */
- if (count == 2) {
- offset = local_ports_in_2x;
- base_port = local_port;
- if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
- netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
- NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
- return -EINVAL;
- }
- } else {
- offset = local_ports_in_1x;
- base_port = mlxsw_sp_cluster_base_port_get(local_port);
- if (mlxsw_sp->ports[base_port + 1] ||
- mlxsw_sp->ports[base_port + 3]) {
+ offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+ if (offset < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ return -EINVAL;
+ }
+
+ /* Only in case max split is being done, the local port and
+ * base port may differ.
+ */
+ base_port = count == max_width ?
+ mlxsw_sp_cluster_base_port_get(local_port, max_width) :
+ local_port;
+
+ for (i = 0; i < count * offset; i++) {
+ /* Expect base port to exist and also the one in the middle in
+ * case of maximal split count.
+ */
+ if (i == 0 || (count == max_width && i == count / 2))
+ continue;
+
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
}
+ port_mapping = mlxsw_sp_port->mapping;
+
for (i = 0; i < count; i++)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
- offset);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
+ count, offset);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -4118,7 +4265,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
err_port_split_create:
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
return err;
}
@@ -4126,19 +4273,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 cur_width, base_port;
unsigned int count;
+ int max_width;
+ u8 base_port;
+ int offset;
int i;
- if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
- return -EIO;
-
- local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
- local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
-
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -4153,25 +4294,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- cur_width = mlxsw_sp_port->mapping.width;
- count = cur_width == 1 ? 4 : 2;
+ max_width = mlxsw_core_module_max_width(mlxsw_core,
+ mlxsw_sp_port->mapping.module);
+ if (max_width < 0) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
+ return max_width;
+ }
- if (count == 2)
- offset = local_ports_in_2x;
- else
- offset = local_ports_in_1x;
+ count = max_width / mlxsw_sp_port->mapping.width;
- base_port = mlxsw_sp_cluster_base_port_get(local_port);
+ offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
+ if (WARN_ON(offset < 0)) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ return -EINVAL;
+ }
- /* Determine which ports to remove. */
- if (count == 2 && local_port >= base_port + 2)
- base_port = base_port + 2;
+ base_port = mlxsw_sp_port->split_base_local_port;
for (i = 0; i < count; i++)
if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
return 0;
}
@@ -4738,7 +4884,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr);
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
@@ -4831,7 +4978,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
- err = mlxsw_sp_router_init(mlxsw_sp);
+ err = mlxsw_sp_router_init(mlxsw_sp, extack);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
@@ -4864,7 +5011,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
* respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
- err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
goto err_netdev_notifier;
@@ -4876,6 +5024,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_dpipe_init;
}
+ err = mlxsw_sp_port_module_info_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
+ goto err_port_module_info_init;
+ }
+
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -4885,9 +5039,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
+err_port_module_info_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
err_netdev_notifier:
if (mlxsw_sp->clock)
mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
@@ -4924,7 +5081,8 @@ err_fids_init:
}
static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -4944,14 +5102,17 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
- return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
+ mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -4964,7 +5125,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
- return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
@@ -4972,8 +5133,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_port_module_info_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->netdevice_nb);
if (mlxsw_sp->clock) {
mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
@@ -5165,14 +5328,61 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&kvd_size_params);
}
+static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params span_size_params;
+ u32 max_span;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
+ return -EIO;
+
+ max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
+ devlink_resource_size_params_init(&span_size_params, max_span, max_span,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ return 0;
+
+err_resources_span_register:
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ return err;
}
static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
{
- return mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_resources_span_register(mlxsw_core);
+ if (err)
+ goto err_resources_span_register;
+
+ return 0;
+
+err_resources_span_register:
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ return err;
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -6565,3 +6775,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index b2a0028b1694..347bec9d1ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -14,6 +14,7 @@
#include <linux/dcbnl.h>
#include <linux/in6.h>
#include <linux/notifier.h>
+#include <linux/net_namespace.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
@@ -31,8 +32,6 @@
#define MLXSW_SP_MID_MAX 7000
-#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
-
#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
@@ -47,6 +46,8 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
+#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
+
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
@@ -55,6 +56,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_SPAN,
};
struct mlxsw_sp_port;
@@ -139,6 +141,12 @@ struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
+struct mlxsw_sp_port_mapping {
+ u8 module;
+ u8 width;
+ u8 lane;
+};
+
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
@@ -146,7 +154,7 @@ struct mlxsw_sp {
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
- int *port_to_module;
+ struct mlxsw_sp_port_mapping **port_mapping;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
@@ -255,11 +263,11 @@ struct mlxsw_sp_port {
struct ieee_pfc *pfc;
enum mlxsw_reg_qpts_trust_state trust_state;
} dcb;
- struct {
- u8 module;
- u8 width;
- u8 lane;
- } mapping;
+ struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
+ * mlxsw_sp_port lifetime, however
+ * the same localport can have
+ * different mapping.
+ */
/* TC handles */
struct list_head mall_tc_list;
struct {
@@ -283,6 +291,7 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
+ u8 split_base_local_port;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -524,7 +533,8 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr);
@@ -982,4 +992,9 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_net(mlxsw_sp->core);
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index b9eeae37a4dc..968f0902e4fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -35,6 +35,7 @@ struct mlxsw_sp_sb_cm {
};
#define MLXSW_SP_SB_INFI -1U
+#define MLXSW_SP_SB_REST -2U
struct mlxsw_sp_sb_pm {
u32 min_buff;
@@ -421,19 +422,16 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.freeze_size = _freeze_size, \
}
-#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
-#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
@@ -445,19 +443,16 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
-#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
-#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_INGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
- MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
+ true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
@@ -471,11 +466,33 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sb_pr *prs,
+ const struct mlxsw_sp_sb_pool_des *pool_dess,
size_t prs_len)
{
+ /* Round down, unlike mlxsw_sp_bytes_cells(). */
+ u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
+ u32 rest_cells[2] = {sb_cells, sb_cells};
int i;
int err;
+ /* Calculate how much space to give to the "REST" pools in either
+ * direction.
+ */
+ for (i = 0; i < prs_len; i++) {
+ enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
+ u32 size = prs[i].size;
+ u32 size_cells;
+
+ if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
+ continue;
+
+ size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
+ continue;
+
+ rest_cells[dir] -= size_cells;
+ }
+
for (i = 0; i < prs_len; i++) {
u32 size = prs[i].size;
u32 size_cells;
@@ -483,6 +500,10 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
if (size == MLXSW_SP_SB_INFI) {
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
0, true);
+ } else if (size == MLXSW_SP_SB_REST) {
+ size_cells = rest_cells[pool_dess[i].dir];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ size_cells, false);
} else {
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
@@ -904,7 +925,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
return -EIO;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
@@ -915,7 +936,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_BUFFER_SIZE);
+ GUARANTEED_SHARED_BUFFER);
max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_HEADROOM_SIZE);
/* Round down, because this limit must not be overstepped. */
@@ -926,6 +947,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
+ mlxsw_sp->sb_vals->pool_dess,
mlxsw_sp->sb_vals->pool_count);
if (err)
goto err_sb_prs_init;
@@ -1013,7 +1035,8 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
pr = &mlxsw_sp->sb_vals->prs[pool_index];
- if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
return -EINVAL;
}
@@ -1021,12 +1044,12 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
if (pr->freeze_mode && pr->mode != mode) {
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
return -EINVAL;
- };
+ }
if (pr->freeze_size && pr->size != size) {
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
return -EINVAL;
- };
+ }
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
pool_size, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 17f334b46c40..2153bcc4b585 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -870,7 +870,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_vni(fid, &vni)))
goto out;
- nve_dev = dev_get_by_index(&init_net, nve_ifindex);
+ nve_dev = dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!nve_dev)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index bdf53cf350f6..68cc6737d45c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -305,7 +305,8 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
p->max);
return -EINVAL;
}
- if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ GUARANTEED_SHARED_BUFFER)) {
dev_err(mlxsw_sp->bus_info->dev,
"spectrum: RED: max value %u is too big\n", p->max);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index a330b369e899..0e99b64450ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -16,6 +16,7 @@
#include <linux/if_macvlan.h>
#include <linux/refcount.h>
#include <linux/jhash.h>
+#include <linux/net_namespace.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -2551,14 +2552,14 @@ static int mlxsw_sp_router_schedule_work(struct net *net,
struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_router *router;
- if (!net_eq(net, &init_net))
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
return NOTIFY_DONE;
net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
if (!net_work)
return NOTIFY_BAD;
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
INIT_WORK(&net_work->work, cb);
net_work->mlxsw_sp = router->mlxsw_sp;
mlxsw_core_schedule_work(&net_work->work);
@@ -6019,12 +6020,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
@@ -6065,12 +6060,6 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
fib_work->fib6_work.nrt6);
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
}
rtnl_unlock();
kfree(fib_work);
@@ -6112,12 +6101,6 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
&fib_work->ven_info);
dev_put(fib_work->ven_info.dev);
break;
- case FIB_EVENT_RULE_ADD:
- /* if we get here, a rule was added that we do not support.
- * just do the fib_abort
- */
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- break;
}
rtnl_unlock();
kfree(fib_work);
@@ -6213,7 +6196,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
rule = fr_info->rule;
/* Rule only affects locally generated traffic */
- if (rule->iifindex == info->net->loopback_dev->ifindex)
+ if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
return 0;
switch (info->family) {
@@ -6250,8 +6233,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct mlxsw_sp_router *router;
int err;
- if (!net_eq(info->net, &init_net) ||
- (info->family != AF_INET && info->family != AF_INET6 &&
+ if ((info->family != AF_INET && info->family != AF_INET6 &&
info->family != RTNL_FAMILY_IPMR &&
info->family != RTNL_FAMILY_IP6MR))
return NOTIFY_DONE;
@@ -6263,9 +6245,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_RULE_DEL:
err = mlxsw_sp_router_fib_rule_event(event, info,
router->mlxsw_sp);
- if (!err || info->extack)
- return notifier_from_errno(err);
- break;
+ return notifier_from_errno(err);
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
@@ -7974,9 +7954,10 @@ static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
}
-static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
+static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
{
- bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
@@ -7991,9 +7972,9 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
}
-static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
+static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
{
- bool only_l3 = !ip6_multipath_hash_policy(&init_net);
+ bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
@@ -8021,8 +8002,8 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_recr2_pack(recr2_pl, seed);
- mlxsw_sp_mp4_hash_init(recr2_pl);
- mlxsw_sp_mp6_hash_init(recr2_pl);
+ mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
+ mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}
@@ -8053,7 +8034,8 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
- bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority;
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
+ bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
int err;
@@ -8079,7 +8061,8 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_router *router;
int err;
@@ -8155,8 +8138,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
goto err_dscp_init;
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
- mlxsw_sp_router_fib_dump_flush);
+ err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb,
+ mlxsw_sp_router_fib_dump_flush, extack);
if (err)
goto err_register_fib_notifier;
@@ -8195,7 +8179,8 @@ err_register_inetaddr_notifier:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 560a60e522f9..200d324e6d99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -14,8 +14,23 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
+static u64 mlxsw_sp_span_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+ u64 occ = 0;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (mlxsw_sp->span.entries[i].ref_count)
+ occ++;
+ }
+
+ return occ;
+}
+
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
@@ -36,13 +51,19 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
curr->id = i;
}
+ devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
+
return 0;
}
void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5ecb45118400..a3af171c6358 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2591,7 +2591,7 @@ __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- dev = __dev_get_by_index(&init_net, nve_ifindex);
+ dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!dev)
return -EINVAL;
*nve_dev = dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899450b28621..7c03b661ae7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
devlink = priv_to_devlink(mlxsw_sp->core);
in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
local_port);
+ skb_push(skb, ETH_HLEN);
devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
consume_skb(skb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index 0d9356b3f65d..4ff1e623aa76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -446,7 +446,8 @@ static int mlxsw_sib_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
}
static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sib *mlxsw_sib = mlxsw_core_driver_priv(mlxsw_core);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 1c14c051ee52..de6cb22f68b1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -992,6 +992,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
+ dev_net_set(dev, mlxsw_core_net(mlxsw_sx->core));
mlxsw_sx_port = netdev_priv(dev);
mlxsw_sx_port->dev = dev;
mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
@@ -1563,7 +1564,8 @@ static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
}
static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_bus_info *mlxsw_bus_info)
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
int err;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 4d1bce4389c7..344539c0d3aa 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -261,8 +261,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
port->pvid = vid;
/* Untagged egress vlan clasification */
- if (untagged)
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
port->vid = vid;
+ }
ocelot_vlan_port_apply(ocelot, port);
@@ -934,7 +941,7 @@ end:
static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
- return ocelot_vlan_vid_add(dev, vid, false, true);
+ return ocelot_vlan_vid_add(dev, vid, false, false);
}
static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index b063eb78fa0c..723724bdc139 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -364,12 +364,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct device_node *phy_node;
+ phy_interface_t phy_mode;
struct phy_device *phy;
struct resource *res;
struct phy *serdes;
void __iomem *regs;
char res_name[8];
- int phy_mode;
u32 port;
if (of_property_read_u32(portnp, "reg", &port))
@@ -388,20 +388,21 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
phy = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
if (!phy)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
if (err) {
of_node_put(portnp);
- return err;
+ goto out_put_ports;
}
- phy_mode = of_get_phy_mode(portnp);
- if (phy_mode < 0)
- ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
- else
- ocelot->ports[port]->phy_mode = phy_mode;
+ err = of_get_phy_mode(portnp, &phy_mode);
+ if (err && err != -ENODEV)
+ goto out_put_ports;
+
+ ocelot->ports[port]->phy_mode = phy_mode;
switch (ocelot->ports[port]->phy_mode) {
case PHY_INTERFACE_MODE_NA:
@@ -422,7 +423,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_ports;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -435,7 +437,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
"missing SerDes phys for port%d\n",
port);
- goto err_probe_ports;
+ of_node_put(portnp);
+ goto out_put_ports;
}
ocelot->ports[port]->serdes = serdes;
@@ -447,9 +450,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Ocelot switch probed\n");
- return 0;
-
-err_probe_ports:
+out_put_ports:
+ of_node_put(ports);
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 5afcb3c4c2ef..c80bb83c8ac9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3952,7 +3952,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta1, *meta2;
- const s32 exp_mask[] = {
+ static const s32 exp_mask[] = {
[BPF_B] = 0x000000ffU,
[BPF_H] = 0x0000ffffU,
[BPF_W] = 0xffffffffU,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 61aabffc8888..bcdcd6de7dea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -872,7 +872,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
/* jump forward, a TX may have gotten lost, need to sync TX */
if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
- tls_offload_tx_resync_request(nskb->sk);
+ tls_offload_tx_resync_request(nskb->sk, seq,
+ ntls->next_seq);
*nr_frags = 0;
return nskb;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 1eef446036d6..79d72c88bbef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr)
nfp_port_free(repr->port);
}
-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
-
-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
-}
-
-static void nfp_repr_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
-}
-
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev)
@@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
- nfp_repr_set_lockdep_class(netdev);
-
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2761f3a3ae50..49c7987c2abd 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1346,10 +1346,9 @@ static int nixge_probe(struct platform_device *pdev)
}
}
- priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)priv->phy_mode < 0) {
+ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
+ if (err) {
netdev_err(ndev, "not find \"phy-mode\" property\n");
- err = -EINVAL;
goto unregister_mdio;
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 141571e2ec11..ebb81d6d4ca1 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -391,6 +392,7 @@ struct rx_status_t {
struct netdata_local {
struct platform_device *pdev;
struct net_device *ndev;
+ struct device_node *phy_node;
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
@@ -749,22 +751,26 @@ static void lpc_handle_link_change(struct net_device *ndev)
static int lpc_mii_probe(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = phy_find_first(pldat->mii_bus);
-
- if (!phydev) {
- netdev_err(ndev, "no PHY found\n");
- return -ENODEV;
- }
+ struct phy_device *phydev;
/* Attach to the PHY */
if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
netdev_info(ndev, "using MII interface\n");
else
netdev_info(ndev, "using RMII interface\n");
+
+ if (pldat->phy_node)
+ phydev = of_phy_find_device(pldat->phy_node);
+ else
+ phydev = phy_find_first(pldat->mii_bus);
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -ENODEV;
+ }
+
phydev = phy_connect(ndev, phydev_name(phydev),
&lpc_handle_link_change,
lpc_phy_interface_mode(&pldat->pdev->dev));
-
if (IS_ERR(phydev)) {
netdev_err(ndev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
@@ -783,6 +789,7 @@ static int lpc_mii_probe(struct net_device *ndev)
static int lpc_mii_init(struct netdata_local *pldat)
{
+ struct device_node *node;
int err = -ENXIO;
pldat->mii_bus = mdiobus_alloc();
@@ -812,7 +819,10 @@ static int lpc_mii_init(struct netdata_local *pldat)
platform_set_drvdata(pldat->pdev, pldat->mii_bus);
- if (mdiobus_register(pldat->mii_bus))
+ node = of_get_child_by_name(pldat->pdev->dev.of_node, "mdio");
+ err = of_mdiobus_register(pldat->mii_bus, node);
+ of_node_put(node);
+ if (err)
goto err_out_unregister_bus;
if (lpc_mii_probe(pldat->ndev) != 0)
@@ -1345,6 +1355,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
pldat->dma_buff_base_v);
+ pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
/* Get MAC address from current HW setting (POR state is all zeros) */
__lpc_get_mac(pldat, ndev->dev_addr);
@@ -1356,9 +1368,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
- /* Reset the ethernet controller */
- __lpc_eth_reset(pldat);
-
/* then shut everything down to save power */
__lpc_eth_shutdown(pldat);
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index bd0583e409df..d25b88f53de4 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
+ select NET_DEVLINK
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 7a7060677f15..98e102af7756 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,7 +12,7 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.15.0-k"
+#define IONIC_DRV_VERSION "0.18.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
@@ -46,6 +46,8 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
struct notifier_block nb;
+ struct timer_list watchdog_timer;
+ int watchdog_period;
};
struct ionic_admin_ctx {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d168a6435322..5f9d2ec70446 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -11,6 +11,16 @@
#include "ionic_dev.h"
#include "ionic_lif.h"
+static void ionic_watchdog_cb(struct timer_list *t)
+{
+ struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
+ ionic_heartbeat_check(ionic);
+}
+
void ionic_init_devinfo(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -72,6 +82,11 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
+ timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
+ ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
+ mod_timer(&ionic->watchdog_timer,
+ round_jiffies(jiffies + ionic->watchdog_period));
+
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -80,10 +95,53 @@ int ionic_dev_setup(struct ionic *ionic)
void ionic_dev_teardown(struct ionic *ionic)
{
- /* place holder */
+ del_timer_sync(&ionic->watchdog_timer);
}
/* Devcmd Interface */
+int ionic_heartbeat_check(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+ unsigned long hb_time;
+ u32 fw_status;
+ u32 hb;
+
+ /* wait a little more than one second before testing again */
+ hb_time = jiffies;
+ if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period)))
+ return 0;
+
+ /* firmware is useful only if fw_status is non-zero */
+ fw_status = ioread32(&idev->dev_info_regs->fw_status);
+ if (!fw_status)
+ return -ENXIO;
+
+ /* early FW has no heartbeat, else FW will return non-zero */
+ hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
+ if (!hb)
+ return 0;
+
+ /* are we stalled? */
+ if (hb == idev->last_hb) {
+ /* only complain once for each stall seen */
+ if (idev->last_hb_time != 1) {
+ dev_info(ionic->dev, "FW heartbeat stalled at %d\n",
+ idev->last_hb);
+ idev->last_hb_time = 1;
+ }
+
+ return -ENXIO;
+ }
+
+ if (idev->last_hb_time == 1)
+ dev_info(ionic->dev, "FW heartbeat restored at %d\n", hb);
+
+ idev->last_hb = hb;
+ idev->last_hb_time = hb_time;
+
+ return 0;
+}
+
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 9610aeb7d5f4..4665c5dc5324 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -16,6 +16,7 @@
#define IONIC_MIN_TXRX_DESC 16
#define IONIC_DEF_TXRX_DESC 4096
#define IONIC_LIFS_MAX 1024
+#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
#define IONIC_DEV_CMD_REG_VERSION 1
@@ -123,6 +124,9 @@ struct ionic_dev {
union ionic_dev_info_regs __iomem *dev_info_regs;
union ionic_dev_cmd_regs __iomem *dev_cmd_regs;
+ unsigned long last_hb_time;
+ u32 last_hb;
+
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
@@ -151,12 +155,19 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
+struct ionic_page_info {
+ struct page *page;
+ dma_addr_t dma_addr;
+};
+
struct ionic_desc_info {
void *desc;
void *sg_desc;
struct ionic_desc_info *next;
unsigned int index;
unsigned int left;
+ unsigned int npages;
+ struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
ionic_desc_cb cb;
void *cb_arg;
};
@@ -295,5 +306,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
+int ionic_heartbeat_check(struct ionic *ionic);
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index af1647afa4e8..6fb27dcc5787 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -19,31 +19,30 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
err = devlink_info_driver_name_put(req, IONIC_DRV_NAME);
if (err)
- goto info_out;
+ return err;
err = devlink_info_version_running_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW,
idev->dev_info.fw_version);
if (err)
- goto info_out;
+ return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
buf);
if (err)
- goto info_out;
+ return err;
snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev);
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
buf);
if (err)
- goto info_out;
+ return err;
err = devlink_info_serial_number_put(req, idev->dev_info.serial_num);
-info_out:
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 7d10265f782a..f778fff034f5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -254,12 +254,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
struct ionic_dev *idev;
- u32 req_rs, req_fc;
- u8 fec_type;
int err = 0;
idev = &lif->ionic->idev;
- fec_type = IONIC_PORT_FEC_TYPE_NONE;
/* set autoneg */
if (ks->base.autoneg != idev->port_info->config.an_enable) {
@@ -281,29 +278,6 @@ static int ionic_set_link_ksettings(struct net_device *netdev,
return err;
}
- /* set FEC */
- req_rs = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_RS);
- req_fc = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_BASER);
- if (req_rs && req_fc) {
- netdev_info(netdev, "Only select one FEC mode at a time\n");
- return -EINVAL;
- } else if (req_fc) {
- fec_type = IONIC_PORT_FEC_TYPE_FC;
- } else if (req_rs) {
- fec_type = IONIC_PORT_FEC_TYPE_RS;
- } else if (!(req_rs | req_fc)) {
- fec_type = IONIC_PORT_FEC_TYPE_NONE;
- }
-
- if (fec_type != idev->port_info->config.fec_type) {
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_port_fec(idev, fec_type);
- err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
- mutex_unlock(&ionic->dev_cmd_lock);
- if (err)
- return err;
- }
-
return 0;
}
@@ -353,6 +327,70 @@ static int ionic_set_pauseparam(struct net_device *netdev,
return 0;
}
+static int ionic_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+
+ switch (lif->ionic->idev.port_info->config.fec_type) {
+ case IONIC_PORT_FEC_TYPE_NONE:
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ break;
+ case IONIC_PORT_FEC_TYPE_RS:
+ fec->active_fec = ETHTOOL_FEC_RS;
+ break;
+ case IONIC_PORT_FEC_TYPE_FC:
+ fec->active_fec = ETHTOOL_FEC_BASER;
+ break;
+ }
+
+ fec->fec = ETHTOOL_FEC_OFF | ETHTOOL_FEC_RS | ETHTOOL_FEC_BASER;
+
+ return 0;
+}
+
+static int ionic_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ u8 fec_type;
+ int ret = 0;
+
+ if (lif->ionic->idev.port_info->config.an_enable) {
+ netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n");
+ return -EINVAL;
+ }
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_NONE:
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ break;
+ case ETHTOOL_FEC_OFF:
+ fec_type = IONIC_PORT_FEC_TYPE_NONE;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec_type = IONIC_PORT_FEC_TYPE_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec_type = IONIC_PORT_FEC_TYPE_FC;
+ break;
+ case ETHTOOL_FEC_AUTO:
+ default:
+ netdev_err(netdev, "FEC request 0x%04x not supported\n",
+ fec->fec);
+ return -EINVAL;
+ }
+
+ if (fec_type != lif->ionic->idev.port_info->config.fec_type) {
+ mutex_lock(&lif->ionic->dev_cmd_lock);
+ ionic_dev_cmd_port_fec(&lif->ionic->idev, fec_type);
+ ret = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&lif->ionic->dev_cmd_lock);
+ }
+
+ return ret;
+}
+
static int ionic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coalesce)
{
@@ -372,7 +410,6 @@ static int ionic_set_coalesce(struct net_device *netdev,
struct ionic_identity *ident;
struct ionic_qcq *qcq;
unsigned int i;
- u32 usecs;
u32 coal;
if (coalesce->rx_max_coalesced_frames ||
@@ -410,26 +447,27 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ /* Convert the usec request to a HW useable value. If they asked
+ * for non-zero and it resolved to zero, bump it up
+ */
coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
-
- if (coal > IONIC_INTR_CTRL_COAL_MAX)
- return -ERANGE;
-
- /* If they asked for non-zero and it resolved to zero, bump it up */
if (!coal && coalesce->rx_coalesce_usecs)
coal = 1;
- /* Convert it back to get device resolution */
- usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+ if (coal > IONIC_INTR_CTRL_COAL_MAX)
+ return -ERANGE;
- if (usecs != lif->rx_coalesce_usecs) {
- lif->rx_coalesce_usecs = usecs;
+ /* Save the new value */
+ lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
+ if (coal != lif->rx_coalesce_hw) {
+ lif->rx_coalesce_hw = coal;
if (test_bit(IONIC_LIF_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index, coal);
+ qcq->intr.index,
+ lif->rx_coalesce_hw);
}
}
}
@@ -453,6 +491,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
bool running;
+ int err;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -470,8 +509,9 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = test_bit(IONIC_LIF_UP, lif->state);
if (running)
@@ -504,6 +544,7 @@ static int ionic_set_channels(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
bool running;
+ int err;
if (!ch->combined_count || ch->other_count ||
ch->rx_count || ch->tx_count)
@@ -512,8 +553,9 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = test_bit(IONIC_LIF_UP, lif->state);
if (running)
@@ -747,6 +789,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_regs = ionic_get_regs,
.get_link = ethtool_op_get_link,
.get_link_ksettings = ionic_get_link_ksettings,
+ .set_link_ksettings = ionic_set_link_ksettings,
.get_coalesce = ionic_get_coalesce,
.set_coalesce = ionic_set_coalesce,
.get_ringparam = ionic_get_ringparam,
@@ -769,7 +812,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_module_eeprom = ionic_get_module_eeprom,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
- .set_link_ksettings = ionic_set_link_ksettings,
+ .get_fecparam = ionic_get_fecparam,
+ .set_fecparam = ionic_set_fecparam,
.nway_reset = ionic_nway_reset,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 5bfdda19f64d..dbdb7c5ae8f1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -111,7 +111,7 @@ struct ionic_admin_cmd {
};
/**
- * struct admin_comp - General admin command completion format
+ * struct ionic_admin_comp - General admin command completion format
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -134,7 +134,7 @@ static inline u8 color_match(u8 color, u8 done_color)
}
/**
- * struct nop_cmd - NOP command
+ * struct ionic_nop_cmd - NOP command
* @opcode: opcode
*/
struct ionic_nop_cmd {
@@ -143,7 +143,7 @@ struct ionic_nop_cmd {
};
/**
- * struct nop_comp - NOP command completion
+ * struct ionic_nop_comp - NOP command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_nop_comp {
@@ -152,7 +152,7 @@ struct ionic_nop_comp {
};
/**
- * struct dev_init_cmd - Device init command
+ * struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
* @type: device type
*/
@@ -172,7 +172,7 @@ struct ionic_dev_init_comp {
};
/**
- * struct dev_reset_cmd - Device reset command
+ * struct ionic_dev_reset_cmd - Device reset command
* @opcode: opcode
*/
struct ionic_dev_reset_cmd {
@@ -192,7 +192,7 @@ struct ionic_dev_reset_comp {
#define IONIC_IDENTITY_VERSION_1 1
/**
- * struct dev_identify_cmd - Driver/device identify command
+ * struct ionic_dev_identify_cmd - Driver/device identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
*/
@@ -284,7 +284,7 @@ enum ionic_lif_type {
};
/**
- * struct lif_identify_cmd - lif identify command
+ * struct ionic_lif_identify_cmd - lif identify command
* @opcode: opcode
* @type: lif type (enum lif_type)
* @ver: version of identify returned by device
@@ -297,7 +297,7 @@ struct ionic_lif_identify_cmd {
};
/**
- * struct lif_identify_comp - lif identify command completion
+ * struct ionic_lif_identify_comp - lif identify command completion
* @status: status of the command (enum status_code)
* @ver: version of identify returned by device
*/
@@ -325,7 +325,7 @@ enum ionic_logical_qtype {
};
/**
- * struct lif_logical_qtype - Descriptor of logical to hardware queue type.
+ * struct ionic_lif_logical_qtype - Descriptor of logical to hardware queue type.
* @qtype: Hardware Queue Type.
* @qid_count: Number of Queue IDs of the logical type.
* @qid_base: Minimum Queue ID of the logical type.
@@ -349,7 +349,7 @@ enum ionic_lif_state {
* @name: lif name
* @mtu: mtu
* @mac: station mac address
- * @features: features (enum eth_hw_features)
+ * @features: features (enum ionic_eth_hw_features)
* @queue_count: queue counts per queue-type
*/
union ionic_lif_config {
@@ -367,7 +367,7 @@ union ionic_lif_config {
};
/**
- * struct lif_identity - lif identity information (type-specific)
+ * struct ionic_lif_identity - lif identity information (type-specific)
*
* @capabilities LIF capabilities
*
@@ -441,11 +441,11 @@ union ionic_lif_identity {
};
/**
- * struct lif_init_cmd - LIF init command
+ * struct ionic_lif_init_cmd - LIF init command
* @opcode: opcode
* @type: LIF type (enum lif_type)
* @index: LIF index
- * @info_pa: destination address for lif info (struct lif_info)
+ * @info_pa: destination address for lif info (struct ionic_lif_info)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -457,7 +457,7 @@ struct ionic_lif_init_cmd {
};
/**
- * struct lif_init_comp - LIF init command completion
+ * struct ionic_lif_init_comp - LIF init command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_lif_init_comp {
@@ -468,7 +468,7 @@ struct ionic_lif_init_comp {
};
/**
- * struct q_init_cmd - Queue init command
+ * struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
* @type: Logical queue type
* @ver: Queue version (defines opcode/descriptor scope)
@@ -525,7 +525,7 @@ struct ionic_q_init_cmd {
};
/**
- * struct q_init_comp - Queue init command completion
+ * struct ionic_q_init_comp - Queue init command completion
* @status: The status of the command (enum status_code)
* @ver: Queue version (defines opcode/descriptor scope)
* @comp_index: The index in the descriptor ring for which this
@@ -556,7 +556,7 @@ enum ionic_txq_desc_opcode {
};
/**
- * struct txq_desc - Ethernet Tx queue descriptor format
+ * struct ionic_txq_desc - Ethernet Tx queue descriptor format
* @opcode: Tx operation, see TXQ_DESC_OPCODE_*:
*
* IONIC_TXQ_DESC_OPCODE_CSUM_NONE:
@@ -735,7 +735,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
#define IONIC_RX_MAX_SG_ELEMS 8
/**
- * struct txq_sg_desc - Transmit scatter-gather (SG) list
+ * struct ionic_txq_sg_desc - Transmit scatter-gather (SG) list
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
@@ -748,7 +748,7 @@ struct ionic_txq_sg_desc {
};
/**
- * struct txq_comp - Ethernet transmit queue completion descriptor
+ * struct ionic_txq_comp - Ethernet transmit queue completion descriptor
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -768,7 +768,7 @@ enum ionic_rxq_desc_opcode {
};
/**
- * struct rxq_desc - Ethernet Rx queue descriptor format
+ * struct ionic_rxq_desc - Ethernet Rx queue descriptor format
* @opcode: Rx operation, see RXQ_DESC_OPCODE_*:
*
* RXQ_DESC_OPCODE_SIMPLE:
@@ -789,7 +789,7 @@ struct ionic_rxq_desc {
};
/**
- * struct rxq_sg_desc - Receive scatter-gather (SG) list
+ * struct ionic_rxq_sg_desc - Receive scatter-gather (SG) list
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
*/
@@ -802,7 +802,7 @@ struct ionic_rxq_sg_desc {
};
/**
- * struct rxq_comp - Ethernet receive queue completion descriptor
+ * struct ionic_rxq_comp - Ethernet receive queue completion descriptor
* @status: The status of the command (enum status_code)
* @num_sg_elems: Number of SG elements used by this descriptor
* @comp_index: The index in the descriptor ring for which this
@@ -896,7 +896,7 @@ enum ionic_eth_hw_features {
};
/**
- * struct q_control_cmd - Queue control command
+ * struct ionic_q_control_cmd - Queue control command
* @opcode: opcode
* @type: Queue type
* @lif_index: LIF index
@@ -1033,8 +1033,8 @@ enum ionic_port_loopback_mode {
/**
* Transceiver Status information
- * @state: Transceiver status (enum xcvr_state)
- * @phy: Physical connection type (enum phy_type)
+ * @state: Transceiver status (enum ionic_xcvr_state)
+ * @phy: Physical connection type (enum ionic_phy_type)
* @pid: Transceiver link mode (enum pid)
* @sprom: Transceiver sprom contents
*/
@@ -1051,9 +1051,9 @@ struct ionic_xcvr_status {
* @mtu: mtu
* @state: port admin state (enum port_admin_state)
* @an_enable: autoneg enable
- * @fec_type: fec type (enum port_fec_type)
- * @pause_type: pause type (enum port_pause_type)
- * @loopback_mode: loopback mode (enum port_loopback_mode)
+ * @fec_type: fec type (enum ionic_port_fec_type)
+ * @pause_type: pause type (enum ionic_port_pause_type)
+ * @loopback_mode: loopback mode (enum ionic_port_loopback_mode)
*/
union ionic_port_config {
struct {
@@ -1080,7 +1080,7 @@ union ionic_port_config {
/**
* Port Status information
- * @status: link status (enum port_oper_status)
+ * @status: link status (enum ionic_port_oper_status)
* @id: port id
* @speed: link speed (in Mbps)
* @xcvr: tranceiver status
@@ -1094,7 +1094,7 @@ struct ionic_port_status {
};
/**
- * struct port_identify_cmd - Port identify command
+ * struct ionic_port_identify_cmd - Port identify command
* @opcode: opcode
* @index: port index
* @ver: Highest version of identify supported by driver
@@ -1107,7 +1107,7 @@ struct ionic_port_identify_cmd {
};
/**
- * struct port_identify_comp - Port identify command completion
+ * struct ionic_port_identify_comp - Port identify command completion
* @status: The status of the command (enum status_code)
* @ver: Version of identify returned by device
*/
@@ -1118,10 +1118,10 @@ struct ionic_port_identify_comp {
};
/**
- * struct port_init_cmd - Port initialization command
+ * struct ionic_port_init_cmd - Port initialization command
* @opcode: opcode
* @index: port index
- * @info_pa: destination address for port info (struct port_info)
+ * @info_pa: destination address for port info (struct ionic_port_info)
*/
struct ionic_port_init_cmd {
u8 opcode;
@@ -1132,7 +1132,7 @@ struct ionic_port_init_cmd {
};
/**
- * struct port_init_comp - Port initialization command completion
+ * struct ionic_port_init_comp - Port initialization command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_port_init_comp {
@@ -1141,7 +1141,7 @@ struct ionic_port_init_comp {
};
/**
- * struct port_reset_cmd - Port reset command
+ * struct ionic_port_reset_cmd - Port reset command
* @opcode: opcode
* @index: port index
*/
@@ -1152,7 +1152,7 @@ struct ionic_port_reset_cmd {
};
/**
- * struct port_reset_comp - Port reset command completion
+ * struct ionic_port_reset_comp - Port reset command completion
* @status: The status of the command (enum status_code)
*/
struct ionic_port_reset_comp {
@@ -1183,7 +1183,7 @@ enum ionic_port_attr {
};
/**
- * struct port_setattr_cmd - Set port attributes on the NIC
+ * struct ionic_port_setattr_cmd - Set port attributes on the NIC
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
@@ -1207,7 +1207,7 @@ struct ionic_port_setattr_cmd {
};
/**
- * struct port_setattr_comp - Port set attr command completion
+ * struct ionic_port_setattr_comp - Port set attr command completion
* @status: The status of the command (enum status_code)
* @color: Color bit
*/
@@ -1218,7 +1218,7 @@ struct ionic_port_setattr_comp {
};
/**
- * struct port_getattr_cmd - Get port attributes from the NIC
+ * struct ionic_port_getattr_cmd - Get port attributes from the NIC
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
@@ -1231,7 +1231,7 @@ struct ionic_port_getattr_cmd {
};
/**
- * struct port_getattr_comp - Port get attr command completion
+ * struct ionic_port_getattr_comp - Port get attr command completion
* @status: The status of the command (enum status_code)
* @color: Color bit
*/
@@ -1252,10 +1252,10 @@ struct ionic_port_getattr_comp {
};
/**
- * struct lif_status - Lif status register
+ * struct ionic_lif_status - Lif status register
* @eid: most recent NotifyQ event id
* @port_num: port the lif is connected to
- * @link_status: port status (enum port_oper_status)
+ * @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
* @link_down_count: number of times link status changes
*/
@@ -1270,7 +1270,7 @@ struct ionic_lif_status {
};
/**
- * struct lif_reset_cmd - LIF reset command
+ * struct ionic_lif_reset_cmd - LIF reset command
* @opcode: opcode
* @index: LIF index
*/
@@ -1290,7 +1290,7 @@ enum ionic_dev_state {
};
/**
- * enum dev_attr - List of device attributes
+ * enum ionic_dev_attr - List of device attributes
*/
enum ionic_dev_attr {
IONIC_DEV_ATTR_STATE = 0,
@@ -1299,10 +1299,10 @@ enum ionic_dev_attr {
};
/**
- * struct dev_setattr_cmd - Set Device attributes on the NIC
+ * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC
* @opcode: Opcode
- * @attr: Attribute type (enum dev_attr)
- * @state: Device state (enum dev_state)
+ * @attr: Attribute type (enum ionic_dev_attr)
+ * @state: Device state (enum ionic_dev_state)
* @name: The bus info, e.g. PCI slot-device-function, 0 terminated
* @features: Device features
*/
@@ -1319,7 +1319,7 @@ struct ionic_dev_setattr_cmd {
};
/**
- * struct dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_setattr_comp - Device set attr command completion
* @status: The status of the command (enum status_code)
* @features: Device features
* @color: Color bit
@@ -1335,9 +1335,9 @@ struct ionic_dev_setattr_comp {
};
/**
- * struct dev_getattr_cmd - Get Device attributes from the NIC
+ * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC
* @opcode: opcode
- * @attr: Attribute type (enum dev_attr)
+ * @attr: Attribute type (enum ionic_dev_attr)
*/
struct ionic_dev_getattr_cmd {
u8 opcode;
@@ -1346,7 +1346,7 @@ struct ionic_dev_getattr_cmd {
};
/**
- * struct dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_setattr_comp - Device set attr command completion
* @status: The status of the command (enum status_code)
* @features: Device features
* @color: Color bit
@@ -1376,7 +1376,7 @@ enum ionic_rss_hash_types {
};
/**
- * enum lif_attr - List of LIF attributes
+ * enum ionic_lif_attr - List of LIF attributes
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1389,15 +1389,15 @@ enum ionic_lif_attr {
};
/**
- * struct lif_setattr_cmd - Set LIF attributes on the NIC
+ * struct ionic_lif_setattr_cmd - Set LIF attributes on the NIC
* @opcode: Opcode
- * @type: Attribute type (enum lif_attr)
+ * @type: Attribute type (enum ionic_lif_attr)
* @index: LIF index
* @state: lif state (enum lif_state)
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
- * @features: Features (enum eth_hw_features)
+ * @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
* @types: The hash types to enable (see rss_hash_types).
* @key: The hash secret key.
@@ -1426,11 +1426,11 @@ struct ionic_lif_setattr_cmd {
};
/**
- * struct lif_setattr_comp - LIF set attr command completion
+ * struct ionic_lif_setattr_comp - LIF set attr command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
- * @features: features (enum eth_hw_features)
+ * @features: features (enum ionic_eth_hw_features)
* @color: Color bit
*/
struct ionic_lif_setattr_comp {
@@ -1445,9 +1445,9 @@ struct ionic_lif_setattr_comp {
};
/**
- * struct lif_getattr_cmd - Get LIF attributes from the NIC
+ * struct ionic_lif_getattr_cmd - Get LIF attributes from the NIC
* @opcode: Opcode
- * @attr: Attribute type (enum lif_attr)
+ * @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
*/
struct ionic_lif_getattr_cmd {
@@ -1458,7 +1458,7 @@ struct ionic_lif_getattr_cmd {
};
/**
- * struct lif_getattr_comp - LIF get attr command completion
+ * struct ionic_lif_getattr_comp - LIF get attr command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -1466,7 +1466,7 @@ struct ionic_lif_getattr_cmd {
* @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
- * @features: Features (enum eth_hw_features)
+ * @features: Features (enum ionic_eth_hw_features)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1492,7 +1492,7 @@ enum ionic_rx_mode {
};
/**
- * struct rx_mode_set_cmd - Set LIF's Rx mode command
+ * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command
* @opcode: opcode
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
@@ -1519,7 +1519,7 @@ enum ionic_rx_filter_match_type {
};
/**
- * struct rx_filter_add_cmd - Add LIF Rx filter command
+ * struct ionic_rx_filter_add_cmd - Add LIF Rx filter command
* @opcode: opcode
* @qtype: Queue type
* @lif_index: LIF index
@@ -1550,7 +1550,7 @@ struct ionic_rx_filter_add_cmd {
};
/**
- * struct rx_filter_add_comp - Add LIF Rx filter command completion
+ * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
* @status: The status of the command (enum status_code)
* @comp_index: The index in the descriptor ring for which this
* is the completion.
@@ -1567,7 +1567,7 @@ struct ionic_rx_filter_add_comp {
};
/**
- * struct rx_filter_del_cmd - Delete LIF Rx filter command
+ * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command
* @opcode: opcode
* @lif_index: LIF index
* @filter_id: Filter ID
@@ -1583,7 +1583,7 @@ struct ionic_rx_filter_del_cmd {
typedef struct ionic_admin_comp ionic_rx_filter_del_comp;
/**
- * struct qos_identify_cmd - QoS identify command
+ * struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
*
@@ -1595,7 +1595,7 @@ struct ionic_qos_identify_cmd {
};
/**
- * struct qos_identify_comp - QoS identify command completion
+ * struct ionic_qos_identify_comp - QoS identify command completion
* @status: The status of the command (enum status_code)
* @ver: Version of identify returned by device
*/
@@ -1610,7 +1610,7 @@ struct ionic_qos_identify_comp {
#define IONIC_QOS_DSCP_MAX_VALUES 64
/**
- * enum qos_class
+ * enum ionic_qos_class
*/
enum ionic_qos_class {
IONIC_QOS_CLASS_DEFAULT = 0,
@@ -1623,7 +1623,7 @@ enum ionic_qos_class {
};
/**
- * enum qos_class_type - Traffic classification criteria
+ * enum ionic_qos_class_type - Traffic classification criteria
*/
enum ionic_qos_class_type {
IONIC_QOS_CLASS_TYPE_NONE = 0,
@@ -1632,7 +1632,7 @@ enum ionic_qos_class_type {
};
/**
- * enum qos_sched_type - Qos class scheduling type
+ * enum ionic_qos_sched_type - Qos class scheduling type
*/
enum ionic_qos_sched_type {
IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */
@@ -1640,15 +1640,15 @@ enum ionic_qos_sched_type {
};
/**
- * union qos_config - Qos configuration structure
+ * union ionic_qos_config - Qos configuration structure
* @flags: Configuration flags
* IONIC_QOS_CONFIG_F_ENABLE enable
* IONIC_QOS_CONFIG_F_DROP drop/nodrop
* IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite
* IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite
- * @sched_type: Qos class scheduling type (enum qos_sched_type)
- * @class_type: Qos class type (enum qos_class_type)
- * @pause_type: Qos pause type (enum qos_pause_type)
+ * @sched_type: Qos class scheduling type (enum ionic_qos_sched_type)
+ * @class_type: Qos class type (enum ionic_qos_class_type)
+ * @pause_type: Qos pause type (enum ionic_qos_pause_type)
* @name: Qos class name
* @mtu: MTU of the class
* @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP)
@@ -1697,7 +1697,7 @@ union ionic_qos_config {
};
/**
- * union qos_identity - QoS identity structure
+ * union ionic_qos_identity - QoS identity structure
* @version: Version of the identify structure
* @type: QoS system type
* @nclasses: Number of usable QoS classes
@@ -1730,7 +1730,7 @@ struct ionic_qos_init_cmd {
typedef struct ionic_admin_comp ionic_qos_init_comp;
/**
- * struct qos_reset_cmd - Qos config reset command
+ * struct ionic_qos_reset_cmd - Qos config reset command
* @opcode: Opcode
*/
struct ionic_qos_reset_cmd {
@@ -1742,7 +1742,7 @@ struct ionic_qos_reset_cmd {
typedef struct ionic_admin_comp ionic_qos_reset_comp;
/**
- * struct fw_download_cmd - Firmware download command
+ * struct ionic_fw_download_cmd - Firmware download command
* @opcode: opcode
* @addr: dma address of the firmware buffer
* @offset: offset of the firmware buffer within the full image
@@ -1765,9 +1765,9 @@ enum ionic_fw_control_oper {
};
/**
- * struct fw_control_cmd - Firmware control command
+ * struct ionic_fw_control_cmd - Firmware control command
* @opcode: opcode
- * @oper: firmware control operation (enum fw_control_oper)
+ * @oper: firmware control operation (enum ionic_fw_control_oper)
* @slot: slot to activate
*/
struct ionic_fw_control_cmd {
@@ -1779,7 +1779,7 @@ struct ionic_fw_control_cmd {
};
/**
- * struct fw_control_comp - Firmware control copletion
+ * struct ionic_fw_control_comp - Firmware control copletion
* @opcode: opcode
* @slot: slot where the firmware was installed
*/
@@ -1797,13 +1797,13 @@ struct ionic_fw_control_comp {
******************************************************************/
/**
- * struct rdma_reset_cmd - Reset RDMA LIF cmd
+ * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
* @lif_index: lif index
*
* There is no rdma specific dev command completion struct. Completion uses
- * the common struct admin_comp. Only the status is indicated. Nonzero status
- * means the LIF does not support rdma.
+ * the common struct ionic_admin_comp. Only the status is indicated.
+ * Nonzero status means the LIF does not support rdma.
**/
struct ionic_rdma_reset_cmd {
u8 opcode;
@@ -1813,7 +1813,7 @@ struct ionic_rdma_reset_cmd {
};
/**
- * struct rdma_queue_cmd - Create RDMA Queue command
+ * struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
* @lif_index lif index
* @qid_ver: (qid | (rdma version << 24))
@@ -1839,7 +1839,7 @@ struct ionic_rdma_reset_cmd {
* memory registration.
*
* There is no rdma specific dev command completion struct. Completion uses
- * the common struct admin_comp. Only the status is indicated.
+ * the common struct ionic_admin_comp. Only the status is indicated.
**/
struct ionic_rdma_queue_cmd {
u8 opcode;
@@ -1860,7 +1860,7 @@ struct ionic_rdma_queue_cmd {
******************************************************************/
/**
- * struct notifyq_event
+ * struct ionic_notifyq_event
* @eid: event number
* @ecode: event code
* @data: unspecified data about the event
@@ -1875,7 +1875,7 @@ struct ionic_notifyq_event {
};
/**
- * struct link_change_event
+ * struct ionic_link_change_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_LINK_CHANGE
* @link_status: link up or down, with error bits (enum port_status)
@@ -1892,7 +1892,7 @@ struct ionic_link_change_event {
};
/**
- * struct reset_event
+ * struct ionic_reset_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_RESET
* @reset_code: reset type
@@ -1910,7 +1910,7 @@ struct ionic_reset_event {
};
/**
- * struct heartbeat_event
+ * struct ionic_heartbeat_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_HEARTBEAT
*
@@ -1923,7 +1923,7 @@ struct ionic_heartbeat_event {
};
/**
- * struct log_event
+ * struct ionic_log_event
* @eid: event number
* @ecode: event code = EVENT_OPCODE_LOG
* @data: log data
@@ -1937,7 +1937,7 @@ struct ionic_log_event {
};
/**
- * struct port_stats
+ * struct ionic_port_stats
*/
struct ionic_port_stats {
__le64 frames_rx_ok;
@@ -2067,7 +2067,7 @@ struct ionic_mgmt_port_stats {
};
/**
- * struct port_identity - port identity structure
+ * struct ionic_port_identity - port identity structure
* @version: identity structure version
* @type: type of port (enum port_type)
* @num_lanes: number of lanes for the port
@@ -2099,7 +2099,7 @@ union ionic_port_identity {
};
/**
- * struct port_info - port info structure
+ * struct ionic_port_info - port info structure
* @port_status: port status
* @port_stats: port stats
*/
@@ -2110,7 +2110,7 @@ struct ionic_port_info {
};
/**
- * struct lif_stats
+ * struct ionic_lif_stats
*/
struct ionic_lif_stats {
/* RX */
@@ -2264,7 +2264,7 @@ struct ionic_lif_stats {
};
/**
- * struct lif_info - lif info structure
+ * struct ionic_lif_info - lif info structure
*/
struct ionic_lif_info {
union ionic_lif_config config;
@@ -2357,7 +2357,7 @@ union ionic_dev_info_regs {
};
/**
- * union dev_cmd_regs - Device command register format (read-write)
+ * union ionic_dev_cmd_regs - Device command register format (read-write)
* @doorbell: Device Cmd Doorbell, write-only.
* Write a 1 to signal device to process cmd,
* poll done for completion.
@@ -2379,7 +2379,7 @@ union ionic_dev_cmd_regs {
};
/**
- * union dev_regs - Device register format in for bar 0 page 0
+ * union ionic_dev_regs - Device register format in for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
*/
@@ -2433,7 +2433,7 @@ union ionic_adminq_comp {
#define IONIC_ASIC_TYPE_CAPRI 0
/**
- * struct doorbell - Doorbell register layout
+ * struct ionic_doorbell - Doorbell register layout
* @p_index: Producer index
* @ring: Selects the specific ring of the queue to update.
* Type-specific meaning:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 72107a0627a9..60fd14df49d7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
@@ -242,6 +244,21 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
+static void ionic_lif_quiesce(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .attr = IONIC_LIF_ATTR_STATE,
+ .index = lif->index,
+ .state = IONIC_LIF_DISABLE
+ },
+ };
+
+ ionic_adminq_post_wait(lif, &ctx);
+}
+
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
@@ -607,12 +624,14 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.lif_index = cpu_to_le16(lif->index),
.type = q->type,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
+ IONIC_QINIT_F_SG),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
.cq_ring_base = cpu_to_le64(cq->base_pa),
+ .sg_ring_base = cpu_to_le64(q->sg_base_pa),
},
};
int err;
@@ -1430,7 +1449,6 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
unsigned int flags;
unsigned int i;
int err = 0;
- u32 coal;
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
for (i = 0; i < lif->nxqs; i++) {
@@ -1446,21 +1464,22 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
}
- flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR;
- coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs);
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
lif->nrxq_descs,
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
- 0, lif->kern_pid, &lif->rxqcqs[i].qcq);
+ sizeof(struct ionic_rxq_sg_desc),
+ lif->kern_pid, &lif->rxqcqs[i].qcq);
if (err)
goto err_out;
lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[i].qcq->intr.index, coal);
+ lif->rxqcqs[i].qcq->intr.index,
+ lif->rx_coalesce_hw);
ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
lif->txqcqs[i].qcq);
}
@@ -1590,6 +1609,7 @@ int ionic_stop(struct net_device *netdev)
netif_tx_disable(netdev);
ionic_txrx_disable(lif);
+ ionic_lif_quiesce(lif);
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
@@ -1619,8 +1639,9 @@ int ionic_reset_queues(struct ionic_lif *lif)
/* Put off the next watchdog timeout */
netif_trans_update(lif->netdev);
- if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
- return -EBUSY;
+ err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ if (err)
+ return err;
running = netif_running(lif->netdev);
if (running)
@@ -1639,7 +1660,6 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
struct net_device *netdev;
struct ionic_lif *lif;
int tbl_sz;
- u32 coal;
int err;
netdev = alloc_etherdev_mqs(sizeof(*lif),
@@ -1670,8 +1690,9 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
/* Convert the default coalesce value to actual hw resolution */
- coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT);
- lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
+ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
+ lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
+ lif->rx_coalesce_usecs);
snprintf(lif->name, sizeof(lif->name), "lif%u", index);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 812190e729c2..a55fd1f8c31b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -175,25 +175,24 @@ struct ionic_lif {
unsigned long *dbid_inuse;
unsigned int dbid_count;
struct dentry *dentry;
- u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs; /* what the user asked for */
+ u32 rx_coalesce_hw; /* what the hw is using */
+
u32 flags;
struct work_struct tx_timeout_work;
};
#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
+#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
+#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+/* return 0 if successfully set the bit, else non-zero */
static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
{
- unsigned long tlimit = jiffies + HZ;
-
- while (test_and_set_bit(bitname, lif->state) &&
- time_before(jiffies, tlimit))
- usleep_range(100, 200);
-
- return test_bit(bitname, lif->state);
+ return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 15e432386b35..3590ea7fd88a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
@@ -245,6 +247,10 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
goto err_out;
}
+ err = ionic_heartbeat_check(lif->ionic);
+ if (err)
+ goto err_out;
+
memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
@@ -305,6 +311,14 @@ int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
return work_done;
}
+static void ionic_dev_cmd_clean(struct ionic *ionic)
+{
+ union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+
+ iowrite32(0, &regs->doorbell);
+ memset_io(&regs->cmd, 0, sizeof(regs->cmd));
+}
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
{
struct ionic_dev *idev = &ionic->idev;
@@ -314,6 +328,7 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
int opcode;
int done;
int err;
+ int hb;
WARN_ON(in_interrupt());
@@ -328,7 +343,8 @@ try_again:
if (done)
break;
msleep(20);
- } while (!done && time_before(jiffies, max_wait));
+ hb = ionic_heartbeat_check(ionic);
+ } while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
@@ -336,7 +352,15 @@ try_again:
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
+ if (!done && hb) {
+ ionic_dev_cmd_clean(ionic);
+ dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
+ ionic_opcode_to_str(opcode), opcode);
+ return -ENXIO;
+ }
+
if (!done && !time_before(jiffies, max_wait)) {
+ ionic_dev_cmd_clean(ionic);
dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n",
ionic_opcode_to_str(opcode), opcode, max_seconds);
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index e2907884f843..03916b6d47f2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -117,7 +117,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
@@ -149,7 +150,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"txq_%d_%s",
@@ -187,7 +189,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"rxq_%d_cq_%s",
@@ -223,6 +226,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
{
struct ionic_lif_sw_stats lif_stats;
struct ionic_qcq *txqcq, *rxqcq;
+ struct ionic_tx_stats *txstats;
+ struct ionic_rx_stats *rxstats;
int i, q_num;
ionic_get_lif_stats(lif, &lif_stats);
@@ -233,15 +238,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txqcq = lif_to_txqcq(lif, q_num);
+ txstats = &lif_to_txstats(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->stats->tx,
+ **buf = IONIC_READ_STAT64(txstats,
&ionic_tx_stats_desc[i]);
(*buf)++;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ txqcq = lif_to_txqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
@@ -258,22 +265,24 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txqcq->stats->tx.sg_cntr[i];
+ **buf = txstats->sg_cntr[i];
(*buf)++;
}
}
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxqcq = lif_to_rxqcq(lif, q_num);
+ rxstats = &lif_to_rxstats(lif, q_num);
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
+ **buf = IONIC_READ_STAT64(rxstats,
&ionic_rx_stats_desc[i]);
(*buf)++;
}
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_UP, lif->state) &&
+ test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ rxqcq = lif_to_rxqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ab6663d94f42..97e79949b359 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -34,52 +34,110 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
-static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct sk_buff *skb)
+static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
+ unsigned int len, bool frags)
{
- struct ionic_rxq_desc *old = desc_info->desc;
- struct ionic_rxq_desc *new = q->head->desc;
+ struct ionic_lif *lif = q->lif;
+ struct ionic_rx_stats *stats;
+ struct net_device *netdev;
+ struct sk_buff *skb;
+
+ netdev = lif->netdev;
+ stats = q_to_rx_stats(q);
+
+ if (frags)
+ skb = napi_get_frags(&q_to_qcq(q)->napi);
+ else
+ skb = netdev_alloc_skb_ip_align(netdev, len);
- new->addr = old->addr;
- new->len = old->len;
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
+ netdev->name, q->name);
+ stats->alloc_err++;
+ return NULL;
+ }
- ionic_rxq_post(q, true, ionic_rx_clean, skb);
+ return skb;
}
-static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, struct sk_buff **skb)
+static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct ionic_rxq_desc *desc = desc_info->desc;
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->lif->ionic->dev;
- struct sk_buff *new_skb;
- u16 clen, dlen;
-
- clen = le16_to_cpu(comp->len);
- dlen = le16_to_cpu(desc->len);
- if (clen > q->lif->rx_copybreak) {
- dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- dlen, DMA_FROM_DEVICE);
- return false;
- }
+ struct ionic_page_info *page_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ u16 frag_len;
+ u16 len;
- new_skb = netdev_alloc_skb_ip_align(netdev, clen);
- if (!new_skb) {
- dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- dlen, DMA_FROM_DEVICE);
- return false;
- }
+ page_info = &desc_info->pages[0];
+ len = le16_to_cpu(comp->len);
- dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr),
- clen, DMA_FROM_DEVICE);
+ prefetch(page_address(page_info->page) + NET_IP_ALIGN);
- memcpy(new_skb->data, (*skb)->data, clen);
+ skb = ionic_rx_skb_alloc(q, len, true);
+ if (unlikely(!skb))
+ return NULL;
- ionic_rx_recycle(q, desc_info, *skb);
- *skb = new_skb;
+ i = comp->num_sg_elems + 1;
+ do {
+ if (unlikely(!page_info->page)) {
+ struct napi_struct *napi = &q_to_qcq(q)->napi;
- return true;
+ napi->skb = NULL;
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ frag_len = min(len, (u16)PAGE_SIZE);
+ len -= frag_len;
+
+ dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page_info->page, 0, frag_len, PAGE_SIZE);
+ page_info->page = NULL;
+ page_info++;
+ i--;
+ } while (i > 0);
+
+ return skb;
+}
+
+static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info)
+{
+ struct ionic_rxq_comp *comp = cq_info->cq_desc;
+ struct device *dev = q->lif->ionic->dev;
+ struct ionic_page_info *page_info;
+ struct sk_buff *skb;
+ u16 len;
+
+ page_info = &desc_info->pages[0];
+ len = le16_to_cpu(comp->len);
+
+ skb = ionic_rx_skb_alloc(q, len, false);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (unlikely(!page_info->page)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
+ len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, page_address(page_info->page), len);
+ dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
+ len, DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, q->lif->netdev);
+
+ return skb;
}
static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
@@ -87,35 +145,34 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
struct ionic_qcq *qcq = q_to_qcq(q);
- struct sk_buff *skb = cb_arg;
struct ionic_rx_stats *stats;
struct net_device *netdev;
+ struct sk_buff *skb;
stats = q_to_rx_stats(q);
netdev = q->lif->netdev;
- if (comp->status) {
- ionic_rx_recycle(q, desc_info, skb);
+ if (comp->status)
return;
- }
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
- /* no packet processing while resetting */
- ionic_rx_recycle(q, desc_info, skb);
+ /* no packet processing while resetting */
+ if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
return;
- }
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
- ionic_rx_copybreak(q, desc_info, cq_info, &skb);
+ if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ skb = ionic_rx_copybreak(q, desc_info, cq_info);
+ else
+ skb = ionic_rx_frags(q, desc_info, cq_info);
- skb_put(skb, le16_to_cpu(comp->len));
- skb->protocol = eth_type_trans(skb, netdev);
+ if (unlikely(!skb))
+ return;
skb_record_rx_queue(skb, q->index);
- if (netdev->features & NETIF_F_RXHASH) {
+ if (likely(netdev->features & NETIF_F_RXHASH)) {
switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
case IONIC_PKT_TYPE_IPV4:
case IONIC_PKT_TYPE_IPV6:
@@ -132,7 +189,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
}
}
- if (netdev->features & NETIF_F_RXCSUM) {
+ if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__wsum)le16_to_cpu(comp->csum);
@@ -142,18 +199,21 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
stats->csum_none++;
}
- if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
- (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
- (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))
+ if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
+ (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
stats->csum_error++;
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(comp->vlan_tci));
}
- napi_gro_receive(&qcq->napi, skb);
+ if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ napi_gro_receive(&qcq->napi, skb);
+ else
+ napi_gro_frags(&qcq->napi);
}
static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
@@ -213,66 +273,125 @@ void ionic_rx_flush(struct ionic_cq *cq)
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
-static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len,
- dma_addr_t *dma_addr)
+static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
+ dma_addr_t *dma_addr)
{
struct ionic_lif *lif = q->lif;
struct ionic_rx_stats *stats;
struct net_device *netdev;
- struct sk_buff *skb;
struct device *dev;
+ struct page *page;
netdev = lif->netdev;
dev = lif->ionic->dev;
stats = q_to_rx_stats(q);
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
+ page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!page)) {
+ net_err_ratelimited("%s: Page alloc failed on %s!\n",
+ netdev->name, q->name);
stats->alloc_err++;
return NULL;
}
- *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *dma_addr)) {
- dev_kfree_skb(skb);
- net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- netdev->name, q->name);
+ *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, *dma_addr))) {
+ __free_page(page);
+ net_err_ratelimited("%s: DMA single map failed on %s!\n",
+ netdev->name, q->name);
stats->dma_map_err++;
return NULL;
}
- return skb;
+ return page;
+}
+
+static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
+ dma_addr_t dma_addr)
+{
+ struct ionic_lif *lif = q->lif;
+ struct net_device *netdev;
+ struct device *dev;
+
+ netdev = lif->netdev;
+ dev = lif->ionic->dev;
+
+ if (unlikely(!page)) {
+ net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+ netdev->name, q->name);
+ return;
+ }
+
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ __free_page(page);
}
-#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1)
+#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
+#define IONIC_RX_RING_HEAD_BUF_SZ 2048
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
+ struct ionic_desc_info *desc_info;
+ struct ionic_page_info *page_info;
+ struct ionic_rxq_sg_desc *sg_desc;
+ struct ionic_rxq_sg_elem *sg_elem;
struct ionic_rxq_desc *desc;
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ unsigned int nfrags;
bool ring_doorbell;
+ unsigned int i, j;
unsigned int len;
- unsigned int i;
len = netdev->mtu + ETH_HLEN;
+ nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
- skb = ionic_rx_skb_alloc(q, len, &dma_addr);
- if (!skb)
- return;
+ desc_info = q->head;
+ desc = desc_info->desc;
+ sg_desc = desc_info->sg_desc;
+ page_info = &desc_info->pages[0];
+
+ if (page_info->page) { /* recycle the buffer */
+ ring_doorbell = ((q->head->index + 1) &
+ IONIC_RX_RING_DOORBELL_STRIDE) == 0;
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ continue;
+ }
- desc = q->head->desc;
- desc->addr = cpu_to_le64(dma_addr);
- desc->len = cpu_to_le16(len);
- desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ /* fill main descriptor - pages[0] */
+ desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
+ IONIC_RXQ_DESC_OPCODE_SIMPLE;
+ desc_info->npages = nfrags;
+ page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
+ if (unlikely(!page_info->page)) {
+ desc->addr = 0;
+ desc->len = 0;
+ return;
+ }
+ desc->addr = cpu_to_le64(page_info->dma_addr);
+ desc->len = cpu_to_le16(PAGE_SIZE);
+ page_info++;
+
+ /* fill sg descriptors - pages[1..n] */
+ for (j = 0; j < nfrags - 1; j++) {
+ if (page_info->page) /* recycle the sg buffer */
+ continue;
+
+ sg_elem = &sg_desc->elems[j];
+ page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
+ if (unlikely(!page_info->page)) {
+ sg_elem->addr = 0;
+ sg_elem->len = 0;
+ return;
+ }
+ sg_elem->addr = cpu_to_le64(page_info->dma_addr);
+ sg_elem->len = cpu_to_le16(PAGE_SIZE);
+ page_info++;
+ }
ring_doorbell = ((q->head->index + 1) &
IONIC_RX_RING_DOORBELL_STRIDE) == 0;
-
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb);
+ ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
}
}
@@ -283,15 +402,24 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct device *dev = q->lif->ionic->dev;
struct ionic_desc_info *cur;
struct ionic_rxq_desc *desc;
+ unsigned int i;
for (cur = q->tail; cur != q->head; cur = cur->next) {
desc = cur->desc;
- dma_unmap_single(dev, le64_to_cpu(desc->addr),
- le16_to_cpu(desc->len), DMA_FROM_DEVICE);
- dev_kfree_skb(cur->cb_arg);
+ desc->addr = 0;
+ desc->len = 0;
+
+ for (i = 0; i < cur->npages; i++) {
+ if (likely(cur->pages[i].page)) {
+ ionic_rx_page_free(q, cur->pages[i].page,
+ cur->pages[i].dma_addr);
+ cur->pages[i].page = NULL;
+ cur->pages[i].dma_addr = 0;
+ }
+ }
+
cur->cb_arg = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index d473b522afc5..9ad568d93ae6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -37,14 +37,14 @@
#include <linux/slab.h>
#include "qed.h"
-/* Fields of IGU PF CONFIGRATION REGISTER */
+/* Fields of IGU PF CONFIGURATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
-/* Fields of IGU VF CONFIGRATION REGISTER */
+/* Fields of IGU VF CONFIGURATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2ce70097d018..38f7f40b3a4d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -67,10 +67,9 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
#define QED_RDMA_SRQS QED_ROCE_QPS
-#define QED_NVM_CFG_SET_FLAGS 0xE
-#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
#define QED_NVM_CFG_GET_FLAGS 0xA
#define QED_NVM_CFG_GET_PF_FLAGS 0x1A
+#define QED_NVM_CFG_MAX_ATTRS 50
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 entity_id, len, buf[32];
+ bool need_nvm_init = true;
struct qed_ptt *ptt;
u16 cfg_id, count;
int rc = 0, i;
@@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
DP_VERBOSE(cdev, NETIF_MSG_DRV,
"Read config ids: num_attrs = %0d\n", count);
- /* NVM CFG ID attributes */
- for (i = 0; i < count; i++) {
+ /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
+ * arithmetic operations in the implementation.
+ */
+ for (i = 1; i <= count; i++) {
cfg_id = *((u16 *)*data);
*data += 2;
entity_id = **data;
@@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
memcpy(buf, *data, len);
*data += len;
- flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
- QED_NVM_CFG_SET_FLAGS;
+ flags = 0;
+ if (need_nvm_init) {
+ flags |= QED_NVM_CFG_OPTION_INIT;
+ need_nvm_init = false;
+ }
+
+ /* Commit to flash and free the resources */
+ if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
+ flags |= QED_NVM_CFG_OPTION_COMMIT |
+ QED_NVM_CFG_OPTION_FREE;
+ need_nvm_init = true;
+ }
+
+ if (entity_id)
+ flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
DP_VERBOSE(cdev, NETIF_MSG_DRV,
"cfg_id = %d entity = %d len = %d\n", cfg_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 78f77b712b10..dcb5c917f373 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
(qed_iov_validate_active_txq(p_hwfn, vf))) {
vf->b_malicious = true;
DP_NOTICE(p_hwfn,
- "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+ "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
vf->abs_vf_id);
status = PFVF_STATUS_MALICIOUS;
goto out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 9a8fd79611f2..368e88565783 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -305,7 +305,7 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
/**
* @brief Read sriov related information and allocated resources
- * reads from configuraiton space, shmem, etc.
+ * reads from configuration space, shmem, etc.
*
* @param p_hwfn
*
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9a6a9a008714..d6cfe4ffbaf3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1298,7 +1298,7 @@ void qede_config_rx_mode(struct net_device *ndev)
rx_mode.type = QED_FILTER_TYPE_RX_MODE;
/* Remove all previous unicast secondary macs and multicast macs
- * (configrue / leave the primary mac)
+ * (configure / leave the primary mac)
*/
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
edev->ndev->dev_addr);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 457444894d80..b4b8ba00ee01 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
err);
+ dev_kfree_skb_irq(skb);
ql_free_large_buffers(qdev);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 74f81fe03810..0704f8bd1df4 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -741,12 +741,6 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
-static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
-{
- pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ, force);
-}
-
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_60;
@@ -1029,6 +1023,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
{
int value;
+ /* Work around issue with chip reporting wrong PHY ID */
+ if (reg == MII_PHYSID2)
+ return 0xc912;
+
r8168dp_2_mdio_start(tp);
value = r8169_mdio_read(tp, reg);
@@ -1564,7 +1562,7 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
if (dev->mtu > JUMBO_1K &&
tp->mac_version > RTL_GIGA_MAC_VER_06)
- features &= ~NETIF_F_IP_CSUM;
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
return features;
}
@@ -4032,14 +4030,12 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -4057,7 +4053,6 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x3f);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4065,32 +4060,15 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, 0x0c);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-}
-
-static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- rtl_tx_performance_tweak(tp,
- PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
-}
-
-static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- rtl_tx_performance_tweak(tp,
- PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
}
static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
{
- r8168b_0_hw_jumbo_enable(tp);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
}
static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
{
- r8168b_0_hw_jumbo_disable(tp);
-
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
@@ -4098,9 +4076,6 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- r8168b_0_hw_jumbo_enable(tp);
- break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
r8168b_1_hw_jumbo_enable(tp);
@@ -4124,9 +4099,6 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- r8168b_0_hw_jumbo_disable(tp);
- break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
r8168b_1_hw_jumbo_disable(tp);
@@ -4146,6 +4118,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
rtl_lock_config_regs(tp);
}
+static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
+{
+ if (mtu > ETH_DATA_LEN)
+ rtl_hw_jumbo_enable(tp);
+ else
+ rtl_hw_jumbo_disable(tp);
+}
+
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -4439,21 +4419,9 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
}
-static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
+static void rtl_hw_start_8168b(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
- if (tp->dev->mtu <= ETH_DATA_LEN) {
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
- PCI_EXP_DEVCTL_NOSNOOP_EN);
- }
-}
-
-static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
-{
- rtl_hw_start_8168bb(tp);
-
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
@@ -4462,9 +4430,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_disable_clock_request(tp);
}
@@ -4490,9 +4455,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -4503,9 +4465,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
/* Magic. */
RTL_W8(tp, DBG_REG, 0x20);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -4556,19 +4515,6 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
rtl_disable_clock_request(tp);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-}
-
-static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
-{
- rtl_set_def_aspm_entry_latency(tp);
-
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
- rtl_disable_clock_request(tp);
}
static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
@@ -4582,8 +4528,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp);
@@ -4611,9 +4555,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_disable_clock_request(tp);
/* Reset tx FIFO pointer */
@@ -4636,9 +4577,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2);
- if (tp->dev->mtu <= ETH_DATA_LEN)
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -4664,8 +4602,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -4728,8 +4664,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
@@ -4780,8 +4714,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_2);
}
@@ -4966,8 +4899,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
@@ -5025,8 +4956,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_reset_packet_filter(tp);
rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
@@ -5129,8 +5058,6 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
RTL_W8(tp, DBG_REG, FIX_NAK_1);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
RTL_W8(tp, Config1,
LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5146,8 +5073,6 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
}
@@ -5208,8 +5133,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8402);
- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
@@ -5363,13 +5286,13 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
- [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
+ [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = NULL,
[RTL_GIGA_MAC_VER_15] = NULL,
[RTL_GIGA_MAC_VER_16] = NULL,
- [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
@@ -5383,7 +5306,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
- [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
+ [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
@@ -5425,11 +5348,6 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
- tp->mac_version == RTL_GIGA_MAC_VER_16)
- pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_NOSNOOP_EN);
-
if (rtl_is_8168evl_up(tp))
RTL_W8(tp, MaxTxPacketSize, EarlySize);
else
@@ -5485,6 +5403,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
+ rtl_jumbo_config(tp, tp->dev->mtu);
+
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R16(tp, CPlusCmd);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -5498,10 +5418,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- if (new_mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
- else
- rtl_hw_jumbo_disable(tp);
+ rtl_jumbo_config(tp, new_mtu);
dev->mtu = new_mtu;
netdev_update_features(dev);
@@ -5579,18 +5496,15 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
- rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
- goto err_out;
+ rtl8169_rx_clear(tp);
+ return -ENOMEM;
}
tp->Rx_databuff[i] = data;
}
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
- return 0;
-err_out:
- rtl8169_rx_clear(tp);
- return -ENOMEM;
+ return 0;
}
static int rtl8169_init_ring(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index de9aa8c47f1c..5ea14b5fbed8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2046,7 +2046,9 @@ static int ravb_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
- priv->phy_interface = of_get_phy_mode(np);
+ error = of_get_phy_mode(np, &priv->phy_interface);
+ if (error && error != -ENODEV)
+ goto out_release;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7ba35a0bdb29..e19b49c4013e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3183,6 +3183,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
{
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
+ phy_interface_t interface;
const char *mac_addr;
int ret;
@@ -3190,10 +3191,10 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
if (!pdata)
return NULL;
- ret = of_get_phy_mode(np);
- if (ret < 0)
+ ret = of_get_phy_mode(np, &interface);
+ if (ret)
return NULL;
- pdata->phy_interface = ret;
+ pdata->phy_interface = interface;
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 786b158bd305..bc4f951315da 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2189,9 +2189,6 @@ static int rocker_router_fib_event(struct notifier_block *nb,
struct rocker_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
- if (!net_eq(info->net, &init_net))
- return NOTIFY_DONE;
-
if (info->family != AF_INET)
return NOTIFY_DONE;
@@ -2994,7 +2991,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* the device, so no need to pass a callback.
*/
rocker->fib_nb.notifier_call = rocker_router_fib_event;
- err = register_fib_notifier(&rocker->fib_nb, NULL);
+ err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL);
if (err)
goto err_register_fib_notifier;
@@ -3021,7 +3018,7 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_register_switchdev_blocking_notifier:
unregister_switchdev_notifier(&rocker_switchdev_notifier);
err_register_switchdev_notifier:
- unregister_fib_notifier(&rocker->fib_nb);
+ unregister_fib_notifier(&init_net, &rocker->fib_nb);
err_register_fib_notifier:
rocker_remove_ports(rocker);
err_probe_ports:
@@ -3057,7 +3054,7 @@ static void rocker_remove(struct pci_dev *pdev)
unregister_switchdev_blocking_notifier(nb);
unregister_switchdev_notifier(&rocker_switchdev_notifier);
- unregister_fib_notifier(&rocker->fib_nb);
+ unregister_fib_notifier(&init_net, &rocker->fib_nb);
rocker_remove_ports(rocker);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
destroy_workqueue(rocker->rocker_owq);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 2412c87561e0..33f79402850d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -30,12 +30,15 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct sxgbe_dma_cfg *dma_cfg;
+ int err;
if (!np)
return -ENODEV;
*mac = of_get_mac_address(np);
- plat->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &plat->interface);
+ if (err && err != -ENODEV)
+ return err;
plat->bus_id = of_alias_get_id(np, "ethernet");
if (plat->bus_id < 0)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 0ec13f520e90..ad68eb0cb8fd 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -946,8 +946,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Extra channels, even those with TXQs (PTP), do not require
* PIO resources.
*/
- if (!channel->type->want_pio)
+ if (!channel->type->want_pio ||
+ channel->channel >= efx->xdp_channel_offset)
continue;
+
efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
@@ -1296,8 +1298,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
int rc;
channel_vis = max(efx->n_channels,
- (efx->n_tx_channels + efx->n_extra_tx_channels) *
- EFX_TXQ_TYPES);
+ ((efx->n_tx_channels + efx->n_extra_tx_channels) *
+ EFX_TXQ_TYPES) +
+ efx->n_xdp_channels * efx->xdp_tx_per_channel);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -2434,11 +2437,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
- * TSOv2 cannot be used with Hardware timestamping.
+ * TSOv2 cannot be used with Hardware timestamping, and is never needed
+ * for XDP tx.
*/
if (csum_offload && (nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
- !tx_queue->timestamping) {
+ !tx_queue->timestamping && !tx_queue->xdp_tx) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2fef7402233e..0fa9972027db 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -226,6 +226,10 @@ static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
@@ -340,6 +344,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
+ xdp_do_flush_map();
+
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
efx->irq_rx_adaptive &&
@@ -579,9 +585,14 @@ efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
int number;
number = channel->channel;
- if (efx->tx_channel_offset == 0) {
+
+ if (number >= efx->xdp_channel_offset &&
+ !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+ type = "-xdp";
+ number -= efx->xdp_channel_offset;
+ } else if (efx->tx_channel_offset == 0) {
type = "";
- } else if (channel->channel < efx->tx_channel_offset) {
+ } else if (number < efx->tx_channel_offset) {
type = "-rx";
} else {
type = "-tx";
@@ -651,7 +662,7 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
@@ -774,6 +785,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
}
+ efx->xdp_rxq_info_failed = false;
}
static void efx_remove_channel(struct efx_channel *channel)
@@ -798,6 +810,8 @@ static void efx_remove_channels(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
efx_remove_channel(channel);
+
+ kfree(efx->xdp_tx_queues);
}
int
@@ -1435,6 +1449,101 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
return count;
}
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+ unsigned int max_channels,
+ unsigned int extra_channels,
+ unsigned int parallelism)
+{
+ unsigned int n_channels = parallelism;
+ int vec_count;
+ int n_xdp_tx;
+ int n_xdp_ev;
+
+ if (efx_separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+
+ /* To allow XDP transmit to happen from arbitrary NAPI contexts
+ * we allocate a TX queue per CPU. We share event queues across
+ * multiple tx queues, assuming tx and ev queues are both
+ * maximum size.
+ */
+
+ n_xdp_tx = num_possible_cpus();
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+
+ /* Check resources.
+ * We need a channel per event queue, plus a VI per tx queue.
+ * This may be more pessimistic than it needs to be.
+ */
+ if (n_channels + n_xdp_ev > max_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = 0;
+ } else {
+ efx->n_xdp_channels = n_xdp_ev;
+ efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_queue_count = n_xdp_tx;
+ n_channels += n_xdp_ev;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %d TX and %d event queues for XDP\n",
+ n_xdp_tx, n_xdp_ev);
+ }
+
+ n_channels = min(n_channels, max_channels);
+
+ vec_count = pci_msix_vec_count(efx->pci_dev);
+ if (vec_count < 0)
+ return vec_count;
+ if (vec_count < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+ vec_count, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = vec_count;
+ }
+
+ efx->n_channels = n_channels;
+
+ /* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
+ if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
+ n_channels--;
+
+ /* Ignore XDP tx channels when creating rx channels. */
+ n_channels -= efx->n_xdp_channels;
+
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels =
+ min(max(n_channels / 2, 1U),
+ efx->max_tx_channels);
+ efx->tx_channel_offset =
+ n_channels - efx->n_tx_channels;
+ efx->n_rx_channels =
+ max(n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+ efx->tx_channel_offset = 0;
+ efx->n_rx_channels = n_channels;
+ }
+
+ if (efx->n_xdp_channels)
+ efx->xdp_channel_offset = efx->tx_channel_offset +
+ efx->n_tx_channels;
+ else
+ efx->xdp_channel_offset = efx->n_channels;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Allocating %u RX channels\n",
+ efx->n_rx_channels);
+
+ return efx->n_channels;
+}
+
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
@@ -1449,19 +1558,19 @@ static int efx_probe_interrupts(struct efx_nic *efx)
++extra_channels;
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ unsigned int parallelism = efx_wanted_parallelism(efx);
struct msix_entry xentries[EFX_MAX_CHANNELS];
unsigned int n_channels;
- n_channels = efx_wanted_parallelism(efx);
- if (efx_separate_tx_channels)
- n_channels *= 2;
- n_channels += extra_channels;
- n_channels = min(n_channels, efx->max_channels);
-
- for (i = 0; i < n_channels; i++)
- xentries[i].entry = i;
- rc = pci_enable_msix_range(efx->pci_dev,
- xentries, 1, n_channels);
+ rc = efx_allocate_msix_channels(efx, efx->max_channels,
+ extra_channels, parallelism);
+ if (rc >= 0) {
+ n_channels = rc;
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+ n_channels);
+ }
if (rc < 0) {
/* Fall back to single channel MSI */
netif_err(efx, drv, efx->net_dev,
@@ -1480,21 +1589,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
if (rc > 0) {
- efx->n_channels = n_channels;
- if (n_channels > extra_channels)
- n_channels -= extra_channels;
- if (efx_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
- efx->n_rx_channels = max(n_channels -
- efx->n_tx_channels,
- 1U);
- } else {
- efx->n_tx_channels = min(n_channels,
- efx->max_tx_channels);
- efx->n_rx_channels = n_channels;
- }
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
@@ -1506,6 +1600,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
@@ -1524,12 +1620,14 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
+ efx->n_xdp_channels = 0;
+ efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
}
- /* Assign extra channels if possible */
+ /* Assign extra channels if possible, before XDP channels */
efx->n_extra_tx_channels = 0;
- j = efx->n_channels;
+ j = efx->xdp_channel_offset;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
continue;
@@ -1724,29 +1822,50 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-static void efx_set_channels(struct efx_nic *efx)
+static int efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
+ int xdp_queue_number;
efx->tx_channel_offset =
efx_separate_tx_channels ?
efx->n_channels - efx->n_tx_channels : 0;
+ if (efx->xdp_tx_queue_count) {
+ EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+ /* Allocate array for XDP TX queue lookup. */
+ efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+ sizeof(*efx->xdp_tx_queues),
+ GFP_KERNEL);
+ if (!efx->xdp_tx_queues)
+ return -ENOMEM;
+ }
+
/* We need to mark which channels really have RX and TX
* queues, and adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
+ xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
+
+ if (efx_channel_is_xdp_tx(channel) &&
+ xdp_queue_number < efx->xdp_tx_queue_count) {
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ xdp_queue_number++;
+ }
+ }
}
+ return 0;
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1776,7 +1895,9 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
- efx_set_channels(efx);
+ rc = efx_set_channels(efx);
+ if (rc)
+ goto fail1;
/* dimension_resources can fail with EAGAIN */
rc = efx->type->dimension_resources(efx);
@@ -2022,6 +2143,10 @@ static void efx_stop_all(struct efx_nic *efx)
static void efx_remove_all(struct efx_nic *efx)
{
+ rtnl_lock();
+ efx_xdp_setup_prog(efx, NULL);
+ rtnl_unlock();
+
efx_remove_channels(efx);
efx_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV
@@ -2082,6 +2207,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
channel->irq_moderation_us = rx_usecs;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation_us = tx_usecs;
+ else if (efx_channel_is_xdp_tx(channel))
+ channel->irq_moderation_us = tx_usecs;
}
return 0;
@@ -2277,6 +2404,17 @@ static void efx_watchdog(struct net_device *net_dev)
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
+static unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+ /* The maximum MTU that we can fit in a single page, allowing for
+ * framing, overhead and XDP headroom.
+ */
+ int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+ efx->rx_prefix_size + efx->type->rx_buffer_padding +
+ efx->rx_ip_align + XDP_PACKET_HEADROOM;
+
+ return PAGE_SIZE - overhead;
+}
/* Context: process, rtnl_lock() held. */
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
@@ -2288,6 +2426,14 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (rc)
return rc;
+ if (rtnl_dereference(efx->xdp_prog) &&
+ new_mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Requested MTU of %d too big for XDP (max: %d)\n",
+ new_mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
@@ -2489,8 +2635,65 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_udp_tunnel_add = efx_udp_tunnel_add,
.ndo_udp_tunnel_del = efx_udp_tunnel_del,
+ .ndo_xdp_xmit = efx_xdp_xmit,
+ .ndo_bpf = efx_xdp
};
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+
+ if (efx->xdp_rxq_info_failed) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to bind XDP program due to previous failure of rxq_info\n");
+ return -EINVAL;
+ }
+
+ if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to configure XDP with MTU of %d (max: %d)\n",
+ efx->net_dev->mtu, efx_xdp_max_mtu(efx));
+ return -EINVAL;
+ }
+
+ old_prog = rtnl_dereference(efx->xdp_prog);
+ rcu_assign_pointer(efx->xdp_prog, prog);
+ /* Release the reference that was originally passed by the caller. */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+ struct bpf_prog *xdp_prog;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return efx_xdp_setup_prog(efx, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp_prog = rtnl_dereference(efx->xdp_prog);
+ xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+ u32 flags)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 04fed7c06618..45c7ae4114ec 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -322,4 +322,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
return true;
}
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush);
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 86b965875540..8db593fb9699 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -83,6 +83,10 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
};
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
@@ -399,6 +403,19 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
}
}
}
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ unsigned short xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ n_stats++;
+ if (strings) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-xdp-cpu-%hu.tx_packets", xdp);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+
return n_stats;
}
@@ -509,6 +526,14 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
data++;
}
}
+ if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+ int xdp;
+
+ for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+ data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+ data++;
+ }
+ }
efx_ptp_update_stats(efx, data);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 284a1b047ac2..04e49eac7327 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <net/busy_poll.h>
+#include <net/xdp.h>
#include "enum.h"
#include "bitfield.h"
@@ -136,7 +137,8 @@ struct efx_special_buffer {
* struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* freed when descriptor completes
- * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
+ * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
+ * member is the associated buffer to drop a page reference on.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -146,7 +148,10 @@ struct efx_special_buffer {
* Only valid if @unmap_len != 0.
*/
struct efx_tx_buffer {
- const struct sk_buff *skb;
+ union {
+ const struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
union {
efx_qword_t option;
dma_addr_t dma_addr;
@@ -160,6 +165,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
+#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -189,6 +195,7 @@ struct efx_tx_buffer {
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
+ * @xdp_tx: Is this an XDP tx queue?
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@@ -250,6 +257,7 @@ struct efx_tx_queue {
unsigned int piobuf_offset;
bool initialised;
bool timestamping;
+ bool xdp_tx;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
@@ -363,6 +371,8 @@ struct efx_rx_page_state {
* refill was triggered.
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @xdp_rxq_info: XDP specific RX queue information.
+ * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
struct efx_rx_queue {
struct efx_nic *efx;
@@ -394,6 +404,8 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
+ struct xdp_rxq_info xdp_rxq_info;
+ bool xdp_rxq_info_valid;
};
enum efx_sync_events_state {
@@ -441,6 +453,10 @@ enum efx_sync_events_state {
* lack of descriptors
* @n_rx_merge_events: Number of RX merged completion events
* @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP
+ * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
+ * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
+ * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -494,6 +510,10 @@ struct efx_channel {
unsigned int n_rx_nodesc_trunc;
unsigned int n_rx_merge_events;
unsigned int n_rx_merge_packets;
+ unsigned int n_rx_xdp_drops;
+ unsigned int n_rx_xdp_bad_drops;
+ unsigned int n_rx_xdp_tx;
+ unsigned int n_rx_xdp_redirect;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -818,6 +838,8 @@ struct efx_async_filter_insertion {
* @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
+ * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
+ * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -830,6 +852,9 @@ struct efx_async_filter_insertion {
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @n_extra_tx_channels: Number of extra channels with TX queues
+ * @n_xdp_channels: Number of channels used for XDP TX
+ * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
+ * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
@@ -894,6 +919,7 @@ struct efx_async_filter_insertion {
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
+ * @xdp_prog: Current XDP programme for this interface
* @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_state: Architecture-dependent filter table state
* @rps_mutex: Protects RPS state of all channels
@@ -919,6 +945,8 @@ struct efx_async_filter_insertion {
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
+ * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
+ * xdp_rxq_info structures?
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -966,6 +994,9 @@ struct efx_nic {
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+ unsigned int xdp_tx_queue_count;
+ struct efx_tx_queue **xdp_tx_queues;
+
unsigned rxq_entries;
unsigned txq_entries;
unsigned int txq_stop_thresh;
@@ -984,6 +1015,9 @@ struct efx_nic {
unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned n_extra_tx_channels;
+ unsigned int n_xdp_channels;
+ unsigned int xdp_channel_offset;
+ unsigned int xdp_tx_per_channel;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
@@ -1053,6 +1087,10 @@ struct efx_nic {
u64 loopback_modes;
void *loopback_selftest;
+ /* We access loopback_selftest immediately before running XDP,
+ * so we want them next to each other.
+ */
+ struct bpf_prog __rcu *xdp_prog;
struct rw_semaphore filter_sem;
void *filter_state;
@@ -1082,6 +1120,7 @@ struct efx_nic {
bool ptp_warned;
char *vpd_sn;
+ bool xdp_rxq_info_failed;
/* The following fields may be written more often */
@@ -1473,10 +1512,24 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
+static inline struct efx_channel *
+efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
+{
+ EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels);
+ return efx->channel[efx->xdp_channel_offset + index];
+}
+
+static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->xdp_channel_offset <
+ channel->efx->n_xdp_channels;
+}
+
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
- return channel->type && channel->type->want_txqs &&
- channel->type->want_txqs(channel);
+ return efx_channel_is_xdp_tx(channel) ||
+ (channel->type && channel->type->want_txqs &&
+ channel->type->want_txqs(channel));
}
static inline struct efx_tx_queue *
@@ -1500,7 +1553,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
else \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
- efx_tx_queue_used(_tx_queue); \
+ (efx_tx_queue_used(_tx_queue) || \
+ efx_channel_is_xdp_tx(_channel)); \
_tx_queue++)
/* Iterate over all possible TX queues belonging to a channel */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 85ec07f5a674..a7d9841105d8 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -17,6 +17,8 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
#include "net_driver.h"
#include "efx.h"
#include "filter.h"
@@ -27,6 +29,9 @@
/* Preferred number of descriptors to fill at once */
#define EFX_RX_PREFERRED_BATCH 8U
+/* Maximum rx prefix used by any architecture. */
+#define EFX_MAX_RX_PREFIX_SIZE 16
+
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
@@ -95,7 +100,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
- efx->rx_page_buf_step);
+ (efx->rx_page_buf_step + XDP_PACKET_HEADROOM));
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
@@ -185,6 +190,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
page_offset = sizeof(struct efx_rx_page_state);
do {
+ page_offset += XDP_PACKET_HEADROOM;
+ dma_addr += XDP_PACKET_HEADROOM;
+
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
@@ -635,6 +643,123 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
netif_receive_skb(skb);
}
+/** efx_do_xdp: perform XDP processing on a received packet
+ *
+ * Returns true if packet should still be delivered.
+ */
+static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf, u8 **ehp)
+{
+ u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
+ struct efx_rx_queue *rx_queue;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 xdp_act;
+ s16 offset;
+ int err;
+
+ rcu_read_lock();
+ xdp_prog = rcu_dereference(efx->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return true;
+ }
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(channel->rx_pkt_n_frags > 1)) {
+ /* We can't do XDP on fragmented packets - drop. */
+ rcu_read_unlock();
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP is not possible with multiple receive fragments (%d)\n",
+ channel->rx_pkt_n_frags);
+ channel->n_rx_xdp_bad_drops++;
+ return false;
+ }
+
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
+
+ /* Save the rx prefix. */
+ EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
+ memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
+ efx->rx_prefix_size);
+
+ xdp.data = *ehp;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+
+ /* No support yet for XDP metadata */
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + rx_buf->len;
+ xdp.rxq = &rx_queue->xdp_rxq_info;
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ rcu_read_unlock();
+
+ offset = (u8 *)xdp.data - *ehp;
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ /* Fix up rx prefix. */
+ if (offset) {
+ *ehp += offset;
+ rx_buf->page_offset += offset;
+ rx_buf->len -= offset;
+ memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
+ efx->rx_prefix_size);
+ }
+ break;
+
+ case XDP_TX:
+ /* Buffer ownership passes to tx on success. */
+ xdpf = convert_to_xdp_frame(&xdp);
+ err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+ if (unlikely(err != 1)) {
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP TX failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ } else {
+ channel->n_rx_xdp_tx++;
+ }
+ break;
+
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+ if (unlikely(err)) {
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ "XDP redirect failed (%d)\n", err);
+ channel->n_rx_xdp_bad_drops++;
+ } else {
+ channel->n_rx_xdp_redirect++;
+ }
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_bad_drops++;
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+ /* Fall through */
+ case XDP_DROP:
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ channel->n_rx_xdp_drops++;
+ break;
+ }
+
+ return xdp_act == XDP_PASS;
+}
+
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel)
{
@@ -663,6 +788,9 @@ void __efx_rx_packet(struct efx_channel *channel)
goto out;
}
+ if (!efx_do_xdp(efx, channel, rx_buf, &eh))
+ goto out;
+
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
@@ -731,6 +859,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, max_trigger;
+ int rc = 0;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -764,6 +893,19 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->fast_fill_trigger = trigger;
rx_queue->refill_enabled = true;
+ /* Initialise XDP queue information */
+ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+ rx_queue->core_index);
+
+ if (rc) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "Failure to initialise XDP queue information rc=%d\n",
+ rc);
+ efx->xdp_rxq_info_failed = true;
+ } else {
+ rx_queue->xdp_rxq_info_valid = true;
+ }
+
/* Set up RX descriptor ring */
efx_nic_init_rx(rx_queue);
}
@@ -805,6 +947,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
}
kfree(rx_queue->page_ring);
rx_queue->page_ring = NULL;
+
+ if (rx_queue->xdp_rxq_info_valid)
+ xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+ rx_queue->xdp_rxq_info_valid = false;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 65e81ec1b314..00c1c4402451 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -95,6 +95,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_XDP) {
+ xdp_return_frame_rx_napi(buffer->xdpf);
}
buffer->len = 0;
@@ -597,6 +599,94 @@ err:
return NETDEV_TX_OK;
}
+static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ xdp_return_frame_rx_napi(xdpfs[i]);
+}
+
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush)
+{
+ struct efx_tx_buffer *tx_buffer;
+ struct efx_tx_queue *tx_queue;
+ struct xdp_frame *xdpf;
+ dma_addr_t dma_addr;
+ unsigned int len;
+ int space;
+ int cpu;
+ int i;
+
+ cpu = raw_smp_processor_id();
+
+ if (!efx->xdp_tx_queue_count ||
+ unlikely(cpu >= efx->xdp_tx_queue_count))
+ return -EINVAL;
+
+ tx_queue = efx->xdp_tx_queues[cpu];
+ if (unlikely(!tx_queue))
+ return -EINVAL;
+
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+
+ if (!n)
+ return 0;
+
+ /* Check for available space. We should never need multiple
+ * descriptors per frame.
+ */
+ space = efx->txq_entries +
+ tx_queue->read_count - tx_queue->insert_count;
+
+ for (i = 0; i < n; i++) {
+ xdpf = xdpfs[i];
+
+ if (i >= space)
+ break;
+
+ /* We'll want a descriptor for this tx. */
+ prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+ len = xdpf->len;
+
+ /* Map for DMA. */
+ dma_addr = dma_map_single(&efx->pci_dev->dev,
+ xdpf->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+ break;
+
+ /* Create descriptor and set up for unmapping DMA. */
+ tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+ tx_buffer->xdpf = xdpf;
+ tx_buffer->flags = EFX_TX_BUF_XDP |
+ EFX_TX_BUF_MAP_SINGLE;
+ tx_buffer->dma_offset = 0;
+ tx_buffer->unmap_len = len;
+ tx_queue->tx_packets++;
+ }
+
+ /* Pass mapped frames to hardware. */
+ if (flush && i > 0)
+ efx_nic_push_buffers(tx_queue);
+
+ if (i == 0)
+ return -EIO;
+
+ efx_xdp_return_frames(n - i, xdpfs + i);
+
+ return i;
+}
+
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
@@ -857,6 +947,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
+ tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+
/* Set up default function pointers. These may get replaced by
* efx_nic_init_tx() based off NIC/queue capabilities.
*/
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index deb636d653f3..d242906ae233 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -48,7 +48,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
-#include <linux/dma-direct.h>
+#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -89,6 +89,7 @@ struct ioc3_private {
struct device *dma_dev;
u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
+ void *tx_ring;
struct ioc3_etxd *txr;
dma_addr_t rxr_dma;
dma_addr_t txr_dma;
@@ -1173,26 +1174,14 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ioc3 *ioc3;
unsigned long ioc3_base, ioc3_size;
u32 vendor, model, rev;
- int err, pci_using_dac;
+ int err;
/* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err < 0) {
- pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
- pci_name(pdev));
- goto out;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("%s: No usable DMA configuration, aborting.\n",
- pci_name(pdev));
- goto out;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
+ goto out;
}
if (pci_enable_device(pdev))
@@ -1204,9 +1193,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable;
}
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
-
err = pci_request_regions(pdev, "ioc3");
if (err)
goto out_free;
@@ -1242,8 +1228,8 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_stop(ip);
/* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
- ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
- &ip->rxr_dma, GFP_ATOMIC, 0);
+ ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma,
+ GFP_KERNEL);
if (!ip->rxr) {
pr_err("ioc3-eth: rx ring allocation failed\n");
err = -ENOMEM;
@@ -1251,14 +1237,16 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
- ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
- &ip->txr_dma,
- GFP_KERNEL | __GFP_ZERO, 0);
- if (!ip->txr) {
+ ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1,
+ &ip->txr_dma, GFP_KERNEL);
+ if (!ip->tx_ring) {
pr_err("ioc3-eth: tx ring allocation failed\n");
err = -ENOMEM;
goto out_stop;
}
+ /* Align TX ring */
+ ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K);
+ ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K);
ioc3_init(dev);
@@ -1288,7 +1276,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &ioc3_netdev_ops;
dev->ethtool_ops = &ioc3_ethtool_ops;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2);
@@ -1313,11 +1301,11 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_stop:
del_timer_sync(&ip->ioc3_timer);
if (ip->rxr)
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- if (ip->txr)
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma);
+ if (ip->tx_ring)
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
+ ip->txr_dma);
out_res:
pci_release_regions(pdev);
out_free:
@@ -1335,10 +1323,8 @@ static void ioc3_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
- dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
- ip->rxr_dma, 0);
- dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
- ip->txr_dma, 0);
+ dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 55db7fbd43cc..869a498e3b5e 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -252,7 +252,6 @@
#define NETSEC_XDP_CONSUMED BIT(0)
#define NETSEC_XDP_TX BIT(1)
#define NETSEC_XDP_REDIR BIT(2)
-#define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
enum ring_id {
NETSEC_RING_TX = 0,
@@ -282,7 +281,6 @@ struct netsec_desc_ring {
void *vaddr;
u16 head, tail;
u16 xdp_xmit; /* netsec_xdp_xmit packets */
- bool is_xdp;
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
spinlock_t lock; /* XDP tx queue locking */
@@ -634,8 +632,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
unsigned int bytes;
int cnt = 0;
- if (dring->is_xdp)
- spin_lock(&dring->lock);
+ spin_lock(&dring->lock);
bytes = 0;
entry = dring->vaddr + DESC_SZ * tail;
@@ -663,6 +660,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
} else {
+ bytes += desc->xdpf->len;
xdp_return_frame(desc->xdpf);
}
next:
@@ -682,8 +680,8 @@ next:
entry = dring->vaddr + DESC_SZ * tail;
cnt++;
}
- if (dring->is_xdp)
- spin_unlock(&dring->lock);
+
+ spin_unlock(&dring->lock);
if (!cnt)
return false;
@@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
de->attr = attr;
- /* under spin_lock if using XDP */
- if (!dring->is_xdp)
- dma_wmb();
dring->desc[idx] = *desc;
if (desc->buf_type == TYPE_NETSEC_SKB)
@@ -852,8 +847,8 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
enum dma_data_direction dma_dir =
page_pool_get_dma_dir(rx_ring->page_pool);
- dma_handle = page_pool_get_dma_addr(page) +
- NETSEC_RXBUF_HEADROOM;
+ dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
+ sizeof(*xdpf);
dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
dma_dir);
tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
@@ -863,6 +858,7 @@ static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
tx_desc.addr = xdpf->data;
tx_desc.len = xdpf->len;
+ netdev_sent_queue(priv->ndev, xdpf->len);
netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
return NETSEC_XDP_TX;
@@ -1035,7 +1031,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
next:
if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result & NETSEC_XDP_RX_OK) {
+ xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
@@ -1123,12 +1119,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
u16 tso_seg_len = 0;
int filled;
- if (dring->is_xdp)
- spin_lock_bh(&dring->lock);
+ spin_lock_bh(&dring->lock);
filled = netsec_desc_used(dring);
if (netsec_check_stop_tx(priv, filled)) {
- if (dring->is_xdp)
- spin_unlock_bh(&dring->lock);
+ spin_unlock_bh(&dring->lock);
net_warn_ratelimited("%s %s Tx queue full\n",
dev_name(priv->dev), ndev->name);
return NETDEV_TX_BUSY;
@@ -1161,8 +1155,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
- if (dring->is_xdp)
- spin_unlock_bh(&dring->lock);
+ spin_unlock_bh(&dring->lock);
netif_err(priv, drv, priv->ndev,
"%s: DMA mapping failed\n", __func__);
ndev->stats.tx_dropped++;
@@ -1177,8 +1170,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, skb->len);
netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
- if (dring->is_xdp)
- spin_unlock_bh(&dring->lock);
+ spin_unlock_bh(&dring->lock);
netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
return NETDEV_TX_OK;
@@ -1262,7 +1254,6 @@ err:
static void netsec_setup_tx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
- struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
int i;
for (i = 0; i < DESC_NUM; i++) {
@@ -1275,12 +1266,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
*/
de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
}
-
- if (xdp_prog)
- dring->is_xdp = true;
- else
- dring->is_xdp = false;
-
}
static int netsec_setup_rx_dring(struct netsec_priv *priv)
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6e984d5a729f..f7e927ad67fa 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1565,10 +1565,10 @@ static int ave_probe(struct platform_device *pdev)
return -EINVAL;
np = dev->of_node;
- phy_mode = of_get_phy_mode(np);
- if ((int)phy_mode < 0) {
+ ret = of_get_phy_mode(np, &phy_mode);
+ if (ret) {
dev_err(dev, "phy-mode not found\n");
- return -EINVAL;
+ return ret;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 527f93320a5a..d0d2d0fc5f0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -61,9 +61,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
- int phy_mode;
- void __iomem *ctl_block;
struct anarion_gmac *gmac;
+ phy_interface_t phy_mode;
+ void __iomem *ctl_block;
+ int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
@@ -78,7 +79,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = (uintptr_t)ctl_block;
- phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
+ if (err)
+ return ERR_PTR(err);
+
switch (phy_mode) {
case PHY_INTERFACE_MODE_RGMII: /* Fall through */
case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 0d21082ceb93..6ae13dc19510 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -189,9 +189,10 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
{
struct device *dev = &gmac->pdev->dev;
+ int ret;
- gmac->phy_mode = of_get_phy_mode(dev->of_node);
- if ((int)gmac->phy_mode < 0) {
+ ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
+ if (ret) {
dev_err(dev, "missing phy mode property\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 79f2ee37afed..bdb80421acac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -54,7 +54,7 @@ struct mediatek_dwmac_plat_data {
struct device_node *np;
struct regmap *peri_regmap;
struct device *dev;
- int phy_mode;
+ phy_interface_t phy_mode;
bool rmii_rxc;
};
@@ -130,6 +130,31 @@ static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
}
}
+static void mt2712_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay *= 550;
+ mac_delay->rx_delay *= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay *= 170;
+ mac_delay->rx_delay *= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
@@ -199,6 +224,8 @@ static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val);
regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val);
+ mt2712_delay_stage2ps(plat);
+
return 0;
}
@@ -216,6 +243,7 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
+ int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -223,10 +251,10 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- plat->phy_mode = of_get_phy_mode(plat->np);
- if (plat->phy_mode < 0) {
+ err = of_get_phy_mode(plat->np, &plat->phy_mode);
+ if (err) {
dev_err(plat->dev, "not find phy-mode\n");
- return -EINVAL;
+ return err;
}
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 306da8f6b7d5..bd6c01004913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -338,10 +338,9 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
}
dwmac->dev = &pdev->dev;
- dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)dwmac->phy_mode < 0) {
+ ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
+ if (ret) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- ret = -EINVAL;
goto err_remove_config_dt;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index e2e469c37a4d..dc50ba13a746 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -37,7 +37,7 @@ struct rk_gmac_ops {
struct rk_priv_data {
struct platform_device *pdev;
- int phy_iface;
+ phy_interface_t phy_iface;
struct regulator *regulator;
bool suspended;
const struct rk_gmac_ops *ops;
@@ -1224,7 +1224,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
+ of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
bsp_priv->ops = ops;
bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index e9fd661f7995..e1b63df6f96f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -116,7 +116,7 @@
#define ETH_PHY_SEL_MII 0x0
struct sti_dwmac {
- int interface; /* MII interface */
+ phy_interface_t interface; /* MII interface */
bool ext_phyclk; /* Clock from external PHY */
u32 tx_retime_src; /* TXCLK Retiming*/
struct clk *clk; /* PHY clock */
@@ -269,7 +269,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return err;
}
- dwmac->interface = of_get_phy_mode(np);
+ err = of_get_phy_mode(np, &dwmac->interface);
+ if (err && err != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ return err;
+ }
+
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 4ef041bdf6a1..595af2ec89fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -175,7 +175,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
u32 reg = dwmac->mode_reg;
- int val, ret;
+ int val;
switch (plat_dat->interface) {
case PHY_INTERFACE_MODE_MII:
@@ -211,8 +211,8 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
}
/* Need to update PMCCLRR (clear register) */
- ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ dwmac->ops->syscfg_eth_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f97a4096f8fc..eefb06d918c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -651,7 +651,8 @@ static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
}
}
} else {
- netdev_info(dev, "Too many address, switching to promiscuous\n");
+ if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
+ netdev_info(dev, "Too many address, switching to promiscuous\n");
v = EMAC_FRM_FLT_RXALL;
}
@@ -1104,6 +1105,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
+ phy_interface_t interface;
int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
@@ -1177,10 +1179,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node);
- if (ret < 0)
+ ret = of_get_phy_mode(dev->of_node, &interface);
+ if (ret)
return -EINVAL;
- plat_dat->interface = ret;
+ plat_dat->interface = interface;
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index a299da3971b4..26353ef616b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,7 @@
#include "stmmac_platform.h"
struct sunxi_priv_data {
- int interface;
+ phy_interface_t interface;
int clk_enabled;
struct clk *tx_clk;
struct regulator *regulator;
@@ -118,7 +118,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- gmac->interface = of_get_phy_mode(dev->of_node);
+ ret = of_get_phy_mode(dev->of_node, &gmac->interface);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ goto err_remove_config_dt;
+ }
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3d69da112625..d0356fbd1e43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -130,7 +130,6 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW);
writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH);
return;
- break;
case 7:
numhashregs = 4;
break;
@@ -140,7 +139,6 @@ static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
default:
pr_debug("STMMAC: err in setting multicast filter\n");
return;
- break;
}
for (regs = 0; regs < numhashregs; regs++)
writel(mcfilterbits[regs],
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 89a3420eba42..07e97f45755d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -43,6 +43,10 @@
#define GMAC_ARP_ADDR 0x00000210
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
+#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30)
+#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30)
+#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30)
/* RX Queues Routing */
#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
@@ -67,6 +71,7 @@
#define GMAC_PACKET_FILTER_PCF BIT(7)
#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_PACKET_FILTER_VTFE BIT(16)
+#define GMAC_PACKET_FILTER_IPFE BIT(20)
#define GMAC_MAX_PERFECT_ADDRESSES 128
@@ -202,9 +207,11 @@ enum power_event {
#define GMAC_HW_FEAT_MIISEL BIT(0)
/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_L3L4FNUM GENMASK(30, 27)
#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
#define GMAC_HW_FEAT_AVSEL BIT(20)
#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_ADDR64 GENMASK(15, 14)
#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
@@ -227,6 +234,21 @@ enum power_event {
#define GMAC_HI_DCS_SHIFT 16
#define GMAC_HI_REG_AE BIT(31)
+/* L3/L4 Filters regs */
+#define GMAC_L4DPIM0 BIT(21)
+#define GMAC_L4DPM0 BIT(20)
+#define GMAC_L4SPIM0 BIT(19)
+#define GMAC_L4SPM0 BIT(18)
+#define GMAC_L4PEN0 BIT(16)
+#define GMAC_L3DAIM0 BIT(5)
+#define GMAC_L3DAM0 BIT(4)
+#define GMAC_L3SAIM0 BIT(3)
+#define GMAC_L3SAM0 BIT(2)
+#define GMAC_L3PEN0 BIT(0)
+#define GMAC_L4DP0 GENMASK(31, 16)
+#define GMAC_L4DP0_SHIFT 16
+#define GMAC_L4SP0 GENMASK(15, 0)
+
/* MTL registers */
#define MTL_OPERATION_MODE 0x00000c00
#define MTL_FRPE BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 9b4b5f69fc02..bec929daf703 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -401,8 +401,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
int numhashregs = (hw->multicast_filter_bins >> 5);
int mcbitslog2 = hw->mcast_bits_log2;
unsigned int value;
+ u32 mc_filter[8];
int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+
value = readl(ioaddr + GMAC_PACKET_FILTER);
value &= ~GMAC_PACKET_FILTER_HMC;
value &= ~GMAC_PACKET_FILTER_HPF;
@@ -416,16 +419,13 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
/* Pass all multi */
value |= GMAC_PACKET_FILTER_PM;
/* Set all the bits of the HASH tab */
- for (i = 0; i < numhashregs; i++)
- writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
+ memset(mc_filter, 0xff, sizeof(mc_filter));
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
- u32 mc_filter[8];
/* Hash filter for multicast */
value |= GMAC_PACKET_FILTER_HMC;
- memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper n bits of the calculated CRC are used to
* index the contents of the hash table. The number of
@@ -440,14 +440,15 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
*/
mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
}
- for (i = 0; i < numhashregs; i++)
- writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
}
+ for (i = 0; i < numhashregs; i++)
+ writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
+
value |= GMAC_PACKET_FILTER_HPF;
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
@@ -732,7 +733,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- bool is_double)
+ u16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -747,6 +748,16 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
}
writel(value, ioaddr + GMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = GMAC_VLAN_ETV;
+
+ if (is_double) {
+ value |= GMAC_VLAN_EDVLP;
+ value |= GMAC_VLAN_ESVL;
+ value |= GMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + GMAC_VLAN_TAG);
@@ -798,6 +809,106 @@ static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + GMAC_CONFIG);
}
+static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool ipv6, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ /* For IPv6 not both SA/DA filters can be active */
+ if (ipv6) {
+ value |= GMAC_L3PEN0;
+ value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
+ value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ } else {
+ value &= ~GMAC_L3PEN0;
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
+ } else {
+ writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
+ }
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
+static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool udp, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+ if (udp) {
+ value |= GMAC_L4PEN0;
+ } else {
+ value &= ~GMAC_L4PEN0;
+ }
+
+ value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
+ value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
+ if (sa) {
+ value |= GMAC_L4SPM0;
+ if (inv)
+ value |= GMAC_L4SPIM0;
+ } else {
+ value |= GMAC_L4DPM0;
+ if (inv)
+ value |= GMAC_L4DPIM0;
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ value = match & GMAC_L4SP0;
+ } else {
+ value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
+ }
+
+ writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.set_mac = stmmac_set_mac,
@@ -827,11 +938,14 @@ const struct stmmac_ops dwmac4_ops = {
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
+ .flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
.update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
const struct stmmac_ops dwmac410_ops = {
@@ -868,6 +982,8 @@ const struct stmmac_ops dwmac410_ops = {
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
const struct stmmac_ops dwmac510_ops = {
@@ -909,6 +1025,8 @@ const struct stmmac_ops dwmac510_ops = {
.sarc_configure = dwmac4_sarc_configure,
.enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
};
int dwmac4_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 15eb1abba91d..707ab5eba8da 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -431,8 +431,8 @@ static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
{
- p->des0 = cpu_to_le32(addr);
- p->des1 = 0;
+ p->des0 = cpu_to_le32(lower_32_bits(addr));
+ p->des1 = cpu_to_le32(upper_32_bits(addr));
}
static void dwmac4_clear(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 68c157979b94..b24c89572745 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -79,6 +79,10 @@ static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_rx_phy),
+ ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan));
+
writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
}
@@ -97,6 +101,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_tx_phy),
+ ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan));
+
writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
@@ -132,6 +140,9 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= DMA_SYS_BUS_AAL;
+ if (dma_cfg->eame)
+ value |= DMA_SYS_BUS_EAME;
+
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
@@ -353,9 +364,27 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature1 */
hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+
+ dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
+ switch (dma_cap->addr64) {
+ case 0:
+ dma_cap->addr64 = 32;
+ break;
+ case 1:
+ dma_cap->addr64 = 40;
+ break;
+ case 2:
+ dma_cap->addr64 = 48;
+ break;
+ default:
+ dma_cap->addr64 = 32;
+ break;
+ }
+
/* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
* shifting and store the sizes in bytes.
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index b66da0237d2a..5299fa1001a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -65,6 +65,7 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_EAME BIT(11)
#define DMA_AXI_BLEN256 BIT(7)
#define DMA_AXI_BLEN128 BIT(6)
#define DMA_AXI_BLEN64 BIT(5)
@@ -91,7 +92,9 @@
#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10)
#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18)
#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 3f4f3132e16b..e436fa160c7d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
if (!enable) {
val |= PPSCMDx(index, 0x5);
+ val |= PPSEN0;
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 5923ca62d793..99037386080a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -84,7 +84,7 @@
#define XGMAC_TSIE BIT(12)
#define XGMAC_LPIIE BIT(5)
#define XGMAC_PMTIE BIT(4)
-#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
+#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE)
#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
#define XGMAC_PT GENMASK(31, 16)
#define XGMAC_PT_SHIFT 16
@@ -122,6 +122,7 @@
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27)
+#define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24)
#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
#define XGMAC_HWFEAT_SPHEN BIT(17)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 2b277b2c586b..5cda360d5d07 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -472,7 +472,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
value |= XGMAC_FILTER_PR;
} else {
struct netdev_hw_addr *ha;
@@ -523,8 +523,8 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq)
{
void __iomem *ioaddr = hw->pcsr;
+ u32 value, *key;
int i, ret;
- u32 value;
value = readl(ioaddr + XGMAC_RSS_CTRL);
if (!cfg || !cfg->enable) {
@@ -533,8 +533,9 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
- for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
- ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
+ key = (u32 *)cfg->key;
+ for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
if (ret)
return ret;
}
@@ -554,7 +555,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- bool is_double)
+ u16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -575,6 +576,21 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
}
writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 53c4a40d8386..7cc331996cd8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -27,7 +27,10 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr,
if (dma_cfg->aal)
value |= XGMAC_AAL;
- writel(value | XGMAC_EAME, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ if (dma_cfg->eame)
+ value |= XGMAC_EAME;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
}
static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
@@ -380,6 +383,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
+ dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ddb851d99618..1303d1e9a18f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -357,7 +357,7 @@ struct stmmac_ops {
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- bool is_double);
+ u16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d3232738fb25..654a2b7595b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -629,6 +629,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -2609,7 +2610,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@ -2994,6 +2995,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
stmmac_set_desc_addr(priv, first, des);
tmp_pay_len = pay_len;
+ des += proto_hdr_len;
}
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -4206,15 +4208,25 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- u16 vid;
+ int count = 0;
+ u16 vid = 0;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
__le16 vid_le = cpu_to_le16(vid);
crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
hash |= (1 << crc);
+ count++;
+ }
+
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ vid = cpu_to_le16(vid);
+ hash = 0;
}
- return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, vid, is_double);
}
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -4223,8 +4235,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4243,8 +4253,6 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -4514,6 +4522,13 @@ int stmmac_dvr_probe(struct device *device,
if (!ret) {
dev_info(priv->device, "Using %d bits DMA width\n",
priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
} else {
ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
if (ret) {
@@ -4715,11 +4730,9 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
- mutex_lock(&priv->lock);
+ phylink_mac_change(priv->phylink, false);
- rtnl_lock();
- phylink_stop(priv->phylink);
- rtnl_unlock();
+ mutex_lock(&priv->lock);
netif_device_detach(ndev);
stmmac_stop_all_queues(priv);
@@ -4734,11 +4747,19 @@ int stmmac_suspend(struct device *dev)
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
+ mutex_unlock(&priv->lock);
+ rtnl_lock();
+ phylink_stop(priv->phylink);
+ rtnl_unlock();
+ mutex_lock(&priv->lock);
+
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable(priv->plat->pclk);
- clk_disable(priv->plat->stmmac_clk);
+ if (priv->plat->clk_ptp_ref)
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
}
mutex_unlock(&priv->lock);
@@ -4801,8 +4822,10 @@ int stmmac_resume(struct device *dev)
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk previously disabled */
- clk_enable(priv->plat->stmmac_clk);
- clk_enable(priv->plat->pclk);
+ clk_prepare_enable(priv->plat->stmmac_clk);
+ clk_prepare_enable(priv->plat->pclk);
+ if (priv->plat->clk_ptp_ref)
+ clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
@@ -4824,12 +4847,16 @@ int stmmac_resume(struct device *dev)
stmmac_start_all_queues(priv);
- rtnl_lock();
- phylink_start(priv->phylink);
- rtnl_unlock();
-
mutex_unlock(&priv->lock);
+ if (!device_may_wakeup(priv->device)) {
+ rtnl_lock();
+ phylink_start(priv->phylink);
+ rtnl_unlock();
+ }
+
+ phylink_mac_change(priv->phylink, true);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_resume);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 170c3a052b14..bedaff0c13bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
*mac = NULL;
}
- plat->phy_interface = of_get_phy_mode(np);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ rc = of_get_phy_mode(np, &plat->phy_interface);
+ if (rc)
+ return ERR_PTR(rc);
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 173493db038c..df638b18b72c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -164,7 +164,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
/* structure describing a PTP hardware clock */
static struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
- .name = "stmmac_ptp_clock",
+ .name = "stmmac ptp",
.max_adj = 62500000,
.n_alarm = 0,
.n_ext_ts = 0,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 5f66f6161629..0b5db52149bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -487,8 +487,8 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
static int stmmac_test_hfilt(struct stmmac_priv *priv)
{
- unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
- unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+ unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+ unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
struct stmmac_packet_attrs attr = { };
int ret;
@@ -496,6 +496,9 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
if (ret)
return ret;
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
+
ret = dev_mc_add(priv->dev, gd_addr);
if (ret)
return ret;
@@ -573,6 +576,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (!priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
/* Remove all MC addresses */
__dev_mc_unsync(priv->dev, NULL);
@@ -611,6 +616,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
if (stmmac_filter_check(priv))
return -EOPNOTSUPP;
+ if (!priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
/* Remove all UC addresses */
__dev_uc_unsync(priv->dev, NULL);
@@ -816,16 +823,13 @@ out:
return 0;
}
-static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
-
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
@@ -891,16 +895,32 @@ cleanup:
return ret;
}
-static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_vlanfilt(priv);
+}
+
+static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_vlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
+static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
-
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
@@ -967,6 +987,25 @@ cleanup:
return ret;
}
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_dvlanfilt(priv);
+}
+
+static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_dvlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
#ifdef CONFIG_NET_CLS_ACT
static int stmmac_test_rxp(struct stmmac_priv *priv)
{
@@ -1564,10 +1603,6 @@ static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
struct stmmac_packet_attrs attr = { };
int size = priv->dma_buf_sz;
- /* Only XGMAC has SW support for multiple RX descs in same packet */
- if (priv->plat->has_xgmac)
- size = priv->dev->max_mtu;
-
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
attr.queue_mapping = queue;
@@ -1645,119 +1680,127 @@ static const struct stmmac_test {
int (*fn)(struct stmmac_priv *priv);
} stmmac_selftests[] = {
{
- .name = "MAC Loopback ",
+ .name = "MAC Loopback ",
.lb = STMMAC_LOOPBACK_MAC,
.fn = stmmac_test_mac_loopback,
}, {
- .name = "PHY Loopback ",
+ .name = "PHY Loopback ",
.lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
.fn = stmmac_test_phy_loopback,
}, {
- .name = "MMC Counters ",
+ .name = "MMC Counters ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mmc,
}, {
- .name = "EEE ",
+ .name = "EEE ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_eee,
}, {
- .name = "Hash Filter MC ",
+ .name = "Hash Filter MC ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_hfilt,
}, {
- .name = "Perfect Filter UC ",
+ .name = "Perfect Filter UC ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_pfilt,
}, {
- .name = "MC Filter ",
+ .name = "MC Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mcfilt,
}, {
- .name = "UC Filter ",
+ .name = "UC Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_ucfilt,
}, {
- .name = "Flow Control ",
+ .name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
}, {
- .name = "RSS ",
+ .name = "RSS ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rss,
}, {
- .name = "VLAN Filtering ",
+ .name = "VLAN Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanfilt,
}, {
- .name = "Double VLAN Filtering",
+ .name = "VLAN Filtering (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt_perfect,
+ }, {
+ .name = "Double VLAN Filter ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_dvlanfilt,
}, {
- .name = "Flexible RX Parser ",
+ .name = "Double VLAN Filter (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt_perfect,
+ }, {
+ .name = "Flexible RX Parser ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rxp,
}, {
- .name = "SA Insertion (desc) ",
+ .name = "SA Insertion (desc) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_desc_sai,
}, {
- .name = "SA Replacement (desc)",
+ .name = "SA Replacement (desc) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_desc_sar,
}, {
- .name = "SA Insertion (reg) ",
+ .name = "SA Insertion (reg) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_reg_sai,
}, {
- .name = "SA Replacement (reg)",
+ .name = "SA Replacement (reg) ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_reg_sar,
}, {
- .name = "VLAN TX Insertion ",
+ .name = "VLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanoff,
}, {
- .name = "SVLAN TX Insertion ",
+ .name = "SVLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_svlanoff,
}, {
- .name = "L3 DA Filtering ",
+ .name = "L3 DA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_da,
}, {
- .name = "L3 SA Filtering ",
+ .name = "L3 SA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_sa,
}, {
- .name = "L4 DA TCP Filtering ",
+ .name = "L4 DA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_tcp,
}, {
- .name = "L4 SA TCP Filtering ",
+ .name = "L4 SA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_tcp,
}, {
- .name = "L4 DA UDP Filtering ",
+ .name = "L4 DA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_udp,
}, {
- .name = "L4 SA UDP Filtering ",
+ .name = "L4 SA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_udp,
}, {
- .name = "ARP Offload ",
+ .name = "ARP Offload ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_arpoffload,
}, {
- .name = "Jumbo Frame ",
+ .name = "Jumbo Frame ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_jumbo,
}, {
- .name = "Multichannel Jumbo ",
+ .name = "Multichannel Jumbo ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mjumbo,
}, {
- .name = "Split Header ",
+ .name = "Split Header ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_sph,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index e231098061b6..f9a9a9d82233 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -510,7 +510,7 @@ static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
return NULL;
}
-struct {
+static struct {
int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry);
} tc_flow_parsers[] = {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f298d714efd6..329671e66fe4 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2619,11 +2619,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
i);
goto no_phy_slave;
}
- slave_data->phy_if = of_get_phy_mode(slave_node);
- if (slave_data->phy_if < 0) {
+ ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
+ if (ret) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
i);
- ret = slave_data->phy_if;
goto err_node_put;
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 362c5a986869..8bfa761fa552 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -275,7 +275,7 @@ struct cpsw_slave_data {
struct device_node *slave_node;
struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
+ phy_interface_t phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
struct phy *ifphy;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index a65edd2770e6..37ba708ac781 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -722,7 +722,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
{
int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
int free_rx_num = 0, free_tx_num = 0;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2c1fac33136c..86a3f42a3dcc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2291,6 +2291,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
phy_interface_t phy_mode;
bool has_phy = false;
+ int err;
void (*hndlr)(struct net_device *) = gbe_adjust_link;
@@ -2320,11 +2321,11 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
slave->phy_port_t = PORT_MII;
} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
has_phy = true;
- phy_mode = of_get_phy_mode(slave->node);
+ err = of_get_phy_mode(slave->node, &phy_mode);
/* if phy-mode is not present, default to
* PHY_INTERFACE_MODE_RGMII
*/
- if (phy_mode < 0)
+ if (err)
phy_mode = PHY_INTERFACE_MODE_RGMII;
if (!phy_interface_mode_is_rgmii(phy_mode)) {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 676006f32f91..867726d696e2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1761,11 +1761,9 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
} else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if ((int)lp->phy_mode < 0) {
- ret = -EINVAL;
+ ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ if (ret)
goto free_netdev;
- }
}
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index bbbc1dcb6ab5..b517c1af9de0 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1237,8 +1237,17 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->open_guard = false;
adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->txrx_wq)) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->control_wq)) {
+ err = -ENOMEM;
+ goto err_free_txrx_wq;
+ }
INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
INIT_WORK(&adapter->raise_intr_rxdata_task,
@@ -1255,7 +1264,7 @@ static int fjes_probe(struct platform_device *plat_dev)
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
err = fjes_hw_init(&adapter->hw);
if (err)
- goto err_free_netdev;
+ goto err_free_control_wq;
/* setup MAC address (02:00:00:00:00:[epid])*/
netdev->dev_addr[0] = 2;
@@ -1277,6 +1286,10 @@ static int fjes_probe(struct platform_device *plat_dev)
err_hw_exit:
fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+ destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+ destroy_workqueue(adapter->txrx_wq);
err_free_netdev:
free_netdev(netdev);
err_out:
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fbec711ff514..fbea6f232819 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -107,27 +107,6 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
/* ------------------------------------------------------------------------ */
@@ -498,7 +477,6 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
- bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 670ef682f268..4209d1cf57f6 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -853,6 +853,7 @@ struct multi_recv_comp {
struct nvsc_rsc {
const struct ndis_pkt_8021q_info *vlan;
const struct ndis_tcp_ip_checksum_info *csum_info;
+ const u32 *hash_info;
u8 is_last; /* last RNDIS msg in a vmtransfer_page */
u32 cnt; /* #fragments in an RSC packet */
u32 pktlen; /* Full packet length */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 39dddcd8b3cb..5fa5c49e481b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash(
else if (flow.basic.n_proto == htons(ETH_P_IPV6))
hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
else
- hash = 0;
+ return 0;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ __skb_set_sw_hash(skb, hash, false);
}
return hash;
@@ -766,6 +766,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
const struct ndis_tcp_ip_checksum_info *csum_info =
nvchan->rsc.csum_info;
+ const u32 *hash_info = nvchan->rsc.hash_info;
struct sk_buff *skb;
int i;
@@ -795,14 +796,16 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->protocol == htons(ETH_P_IP))
netvsc_comp_ipcsum(skb);
- /* Do L4 checksum offload if enabled and present.
- */
+ /* Do L4 checksum offload if enabled and present. */
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+ if (hash_info)
+ skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
+
if (vlan) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
(vlan->cfi ? VLAN_CFI_MASK : 0);
@@ -982,7 +985,7 @@ static int netvsc_attach(struct net_device *ndev,
if (netif_running(ndev)) {
ret = rndis_filter_open(nvdev);
if (ret)
- return ret;
+ goto err;
rdev = nvdev->extension;
if (!rdev->link_state)
@@ -990,6 +993,13 @@ static int netvsc_attach(struct net_device *ndev,
}
return 0;
+
+err:
+ netif_device_detach(ndev);
+
+ rndis_filter_device_remove(hdev, nvdev);
+
+ return ret;
}
static int netvsc_set_channels(struct net_device *net,
@@ -1807,8 +1817,10 @@ static int netvsc_set_features(struct net_device *ndev,
ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
- if (ret)
+ if (ret) {
features ^= NETIF_F_LRO;
+ ndev->features = features;
+ }
syncvf:
if (!vf_netdev)
@@ -2335,8 +2347,6 @@ static int netvsc_probe(struct hv_device *dev,
NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
- netdev_lockdep_set_classes(net);
-
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index abaf8156d19d..c06178380ac8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -358,6 +358,7 @@ static inline
void rsc_add_data(struct netvsc_channel *nvchan,
const struct ndis_pkt_8021q_info *vlan,
const struct ndis_tcp_ip_checksum_info *csum_info,
+ const u32 *hash_info,
void *data, u32 len)
{
u32 cnt = nvchan->rsc.cnt;
@@ -368,6 +369,7 @@ void rsc_add_data(struct netvsc_channel *nvchan,
nvchan->rsc.vlan = vlan;
nvchan->rsc.csum_info = csum_info;
nvchan->rsc.pktlen = len;
+ nvchan->rsc.hash_info = hash_info;
}
nvchan->rsc.data[cnt] = data;
@@ -385,6 +387,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
+ const u32 *hash_info;
u32 data_offset;
void *data;
bool rsc_more = false;
@@ -411,6 +414,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
+ hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
+
pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
data = (void *)msg + data_offset;
@@ -441,7 +446,8 @@ static int rndis_filter_receive_data(struct net_device *ndev,
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len);
+ rsc_add_data(nvchan, vlan, csum_info, hash_info,
+ data, rndis_pkt->data_len);
if (rsc_more)
return NVSP_STAT_SUCCESS;
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index ceddb424f887..0dd0ba915ab9 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_interface *interface)
ieee802154_unregister_hw(atusb->hw);
+ usb_put_dev(atusb->usb_dev);
+
ieee802154_free_hw(atusb->hw);
usb_set_intfdata(interface, NULL);
- usb_put_dev(atusb->usb_dev);
pr_debug("%s done\n", __func__);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 11402dc347db..430c93786153 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3145,12 +3145,12 @@ static int ca8210_probe(struct spi_device *spi_device)
goto error;
}
+ priv->spi->dev.platform_data = pdata;
ret = ca8210_get_platform_data(priv->spi, pdata);
if (ret) {
dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
goto error;
}
- priv->spi->dev.platform_data = pdata;
ret = ca8210_dev_com_init(priv);
if (ret) {
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 17f2300e63ee..8dc04e2590b1 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -800,7 +800,7 @@ mcr20a_handle_rx_read_buf_complete(void *context)
if (!skb)
return;
- memcpy(skb_put(skb, len), lp->rx_buf, len);
+ __skb_put_data(skb, lp->rx_buf, len);
ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 887bbba4631e..a70662261a5a 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -108,8 +108,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
- NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
#define IPVLAN_STATE_MASK \
@@ -131,8 +131,6 @@ static int ipvlan_init(struct net_device *dev)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
- netdev_lockdep_set_classes(dev);
-
ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
return -ENOMEM;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cb7637364b40..afd8b2a08245 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -267,7 +267,6 @@ struct macsec_dev {
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
struct gro_cells gro_cells;
- unsigned int nest_level;
};
/**
@@ -2750,7 +2749,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-static struct lock_class_key macsec_netdev_addr_lock_key;
static int macsec_dev_init(struct net_device *dev)
{
@@ -2958,11 +2956,6 @@ static int macsec_get_iflink(const struct net_device *dev)
return macsec_priv(dev)->real_dev->ifindex;
}
-static int macsec_get_nest_level(struct net_device *dev)
-{
- return macsec_priv(dev)->nest_level;
-}
-
static const struct net_device_ops macsec_netdev_ops = {
.ndo_init = macsec_dev_init,
.ndo_uninit = macsec_dev_uninit,
@@ -2976,7 +2969,6 @@ static const struct net_device_ops macsec_netdev_ops = {
.ndo_start_xmit = macsec_start_xmit,
.ndo_get_stats64 = macsec_get_stats64,
.ndo_get_iflink = macsec_get_iflink,
- .ndo_get_lock_subclass = macsec_get_nest_level,
};
static const struct device_type macsec_type = {
@@ -3001,12 +2993,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
- struct net_device *real_dev = macsec->real_dev;
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
- dev_put(real_dev);
}
static void macsec_setup(struct net_device *dev)
@@ -3261,14 +3251,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
return err;
- dev_hold(real_dev);
-
- macsec->nest_level = dev_get_nest_level(real_dev) + 1;
- netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macsec_netdev_addr_lock_key,
- macsec_get_nest_level(dev));
-
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto unregister;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 940192c057b6..34fc59bd1e20 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
-static struct lock_class_key macvlan_netdev_addr_lock_key;
-
#define ALWAYS_ON_OFFLOADS \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
@@ -869,19 +867,6 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
-static int macvlan_get_nest_level(struct net_device *dev)
-{
- return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
-}
-
-static void macvlan_set_lockdep_class(struct net_device *dev)
-{
- netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key,
- macvlan_get_nest_level(dev));
-}
-
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -900,8 +885,6 @@ static int macvlan_init(struct net_device *dev)
dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
- macvlan_set_lockdep_class(dev);
-
vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
@@ -1161,7 +1144,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_fdb_add = macvlan_fdb_add,
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
- .ndo_get_lock_subclass = macvlan_get_nest_level,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macvlan_dev_poll_controller,
.ndo_netpoll_setup = macvlan_dev_netpoll_setup,
@@ -1445,7 +1427,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
- vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 09f1315d2f2a..f4d8f62f28c2 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o fib.o bus.o
+ netdev.o dev.o fib.o bus.o health.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 1a0ff3d7747b..6aeed0c600f8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -283,6 +283,7 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.bus = &nsim_bus;
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->initial_net = current->nsproxy->net_ns;
err = device_register(&nsim_bus_dev->dev);
if (err)
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 56576d4f34a5..e59a8826f36d 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -90,6 +90,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->test1);
debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
&nsim_dev_take_snapshot_fops);
+ debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->dont_allow_reload);
+ debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->fail_reload);
return 0;
}
@@ -123,39 +127,6 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
debugfs_remove_recursive(nsim_dev_port->ddir);
}
-static struct net *nsim_devlink_net(struct devlink *devlink)
-{
- return &init_net;
-}
-
-static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
-}
-
-static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
-}
-
-static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
-}
-
-static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
-}
-
static int nsim_dev_resources_register(struct devlink *devlink)
{
struct devlink_resource_size_params params = {
@@ -163,9 +134,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY
};
- struct net *net = nsim_devlink_net(devlink);
int err;
- u64 n;
/* Resources for IPv4 */
err = devlink_resource_register(devlink, "IPv4", (u64)-1,
@@ -177,8 +146,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV4_FIB,
NSIM_RESOURCE_IPV4, &params);
if (err) {
@@ -186,8 +154,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV4, &params);
if (err) {
@@ -205,8 +172,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
goto out;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6, &params);
if (err) {
@@ -214,8 +180,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_IPV6, &params);
if (err) {
@@ -223,22 +188,6 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_dev_ipv4_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_dev_ipv4_fib_rules_res_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_dev_ipv6_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_dev_ipv6_fib_rules_res_occ_get,
- net);
out:
return err;
}
@@ -524,36 +473,48 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
kfree(nsim_dev->trap_data);
}
-static int nsim_dev_reload_down(struct devlink *devlink,
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack);
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
+
+static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->dont_allow_reload) {
+ /* For testing purposes, user set debugfs dont_allow_reload
+ * value to true. So forbid it.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
+ return -EOPNOTSUPP;
+ }
+
+ nsim_dev_reload_destroy(nsim_dev);
return 0;
}
static int nsim_dev_reload_up(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- struct net *net = nsim_devlink_net(devlink);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- int err;
- u64 val;
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
- if (!err) {
- err = nsim_fib_set_max(net, res_ids[i], val, extack);
- if (err)
- return err;
- }
+ if (nsim_dev->fail_reload) {
+ /* For testing purposes, user set debugfs fail_reload
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
+ return -EINVAL;
}
- nsim_devlink_param_load_driverinit_values(devlink);
- return 0;
+ return nsim_dev_reload_create(nsim_dev, extack);
+}
+
+static int nsim_dev_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
}
#define NSIM_DEV_FLASH_SIZE 500000
@@ -649,6 +610,7 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
+ .info_get = nsim_dev_info_get,
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
@@ -657,8 +619,141 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
#define NSIM_DEV_MAX_MACS_DEFAULT 32
#define NSIM_DEV_TEST1_DEFAULT true
-static struct nsim_dev *
-nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
+static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev_port *nsim_dev_port;
+ struct devlink_port *devlink_port;
+ int err;
+
+ nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
+ if (!nsim_dev_port)
+ return -ENOMEM;
+ nsim_dev_port->port_index = port_index;
+
+ devlink_port = &nsim_dev_port->devlink_port;
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_index + 1, 0, 0,
+ nsim_dev->switch_id.id,
+ nsim_dev->switch_id.id_len);
+ err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ port_index);
+ if (err)
+ goto err_port_free;
+
+ err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port);
+ if (err)
+ goto err_dl_port_unregister;
+
+ nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
+ if (IS_ERR(nsim_dev_port->ns)) {
+ err = PTR_ERR(nsim_dev_port->ns);
+ goto err_port_debugfs_exit;
+ }
+
+ devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
+ list_add(&nsim_dev_port->list, &nsim_dev->port_list);
+
+ return 0;
+
+err_port_debugfs_exit:
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+err_dl_port_unregister:
+ devlink_port_unregister(devlink_port);
+err_port_free:
+ kfree(nsim_dev_port);
+ return err;
+}
+
+static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
+{
+ struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
+
+ list_del(&nsim_dev_port->list);
+ devlink_port_type_clear(devlink_port);
+ nsim_destroy(nsim_dev_port->ns);
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+ devlink_port_unregister(devlink_port);
+ kfree(nsim_dev_port);
+}
+
+static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry_safe(nsim_dev_port, tmp,
+ &nsim_dev->port_list, list)
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+}
+
+static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
+ unsigned int port_count)
+{
+ int i, err;
+
+ for (i = 0; i < port_count; i++) {
+ err = __nsim_dev_port_add(nsim_dev, i);
+ if (err)
+ goto err_port_del_all;
+ }
+ return 0;
+
+err_port_del_all:
+ nsim_dev_port_del_all(nsim_dev);
+ return err;
+}
+
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = priv_to_devlink(nsim_dev);
+ nsim_dev = devlink_priv(devlink);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+ if (IS_ERR(nsim_dev->fib_data))
+ return PTR_ERR(nsim_dev->fib_data);
+
+ nsim_devlink_param_load_driverinit_values(devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_traps_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_health_exit;
+
+ return 0;
+
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ return err;
+}
+
+static struct nsim_dev *nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev;
struct devlink *devlink;
@@ -667,6 +762,7 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
if (!devlink)
return ERR_PTR(-ENOMEM);
+ devlink_net_set(devlink, nsim_bus_dev->initial_net);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
@@ -681,9 +777,15 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
if (err)
goto err_devlink_free;
+ nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_resources_unregister;
+ }
+
err = devlink_register(devlink, &nsim_bus_dev->dev);
if (err)
- goto err_resources_unregister;
+ goto err_fib_destroy;
err = devlink_params_register(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
@@ -703,13 +805,25 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
if (err)
goto err_traps_exit;
- err = nsim_bpf_dev_init(nsim_dev);
+ err = nsim_dev_health_init(nsim_dev, devlink);
if (err)
goto err_debugfs_exit;
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+ goto err_health_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_bpf_dev_exit;
+
devlink_params_publish(devlink);
return nsim_dev;
+err_bpf_dev_exit:
+ nsim_bpf_dev_exit(nsim_dev);
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
err_debugfs_exit:
nsim_dev_debugfs_exit(nsim_dev);
err_traps_exit:
@@ -721,6 +835,8 @@ err_params_unregister:
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
devlink_unregister(devlink);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
err_devlink_free:
@@ -728,87 +844,33 @@ err_devlink_free:
return ERR_PTR(err);
}
-static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
{
struct devlink *devlink = priv_to_devlink(nsim_dev);
- nsim_bpf_dev_exit(nsim_dev);
- nsim_dev_debugfs_exit(nsim_dev);
+ if (devlink_is_reload_failed(devlink))
+ return;
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_health_exit(nsim_dev);
nsim_dev_traps_exit(devlink);
nsim_dev_dummy_region_exit(nsim_dev);
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
- devlink_unregister(devlink);
- devlink_resources_unregister(devlink, NULL);
mutex_destroy(&nsim_dev->port_list_lock);
- devlink_free(devlink);
-}
-
-static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
- unsigned int port_index)
-{
- struct nsim_dev_port *nsim_dev_port;
- struct devlink_port *devlink_port;
- int err;
-
- nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
- if (!nsim_dev_port)
- return -ENOMEM;
- nsim_dev_port->port_index = port_index;
-
- devlink_port = &nsim_dev_port->devlink_port;
- devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_index + 1, 0, 0,
- nsim_dev->switch_id.id,
- nsim_dev->switch_id.id_len);
- err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
- port_index);
- if (err)
- goto err_port_free;
-
- err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port);
- if (err)
- goto err_dl_port_unregister;
-
- nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
- if (IS_ERR(nsim_dev_port->ns)) {
- err = PTR_ERR(nsim_dev_port->ns);
- goto err_port_debugfs_exit;
- }
-
- devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
- list_add(&nsim_dev_port->list, &nsim_dev->port_list);
-
- return 0;
-
-err_port_debugfs_exit:
- nsim_dev_port_debugfs_exit(nsim_dev_port);
-err_dl_port_unregister:
- devlink_port_unregister(devlink_port);
-err_port_free:
- kfree(nsim_dev_port);
- return err;
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
}
-static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
+static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
{
- struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
-
- list_del(&nsim_dev_port->list);
- devlink_port_type_clear(devlink_port);
- nsim_destroy(nsim_dev_port->ns);
- nsim_dev_port_debugfs_exit(nsim_dev_port);
- devlink_port_unregister(devlink_port);
- kfree(nsim_dev_port);
-}
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
-static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
-{
- struct nsim_dev_port *nsim_dev_port, *tmp;
+ nsim_dev_reload_destroy(nsim_dev);
- list_for_each_entry_safe(nsim_dev_port, tmp,
- &nsim_dev->port_list, list)
- __nsim_dev_port_del(nsim_dev_port);
+ nsim_bpf_dev_exit(nsim_dev);
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+ devlink_free(devlink);
}
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
@@ -817,19 +879,22 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
int i;
int err;
- nsim_dev = nsim_dev_create(nsim_bus_dev, nsim_bus_dev->port_count);
+ nsim_dev = nsim_dev_create(nsim_bus_dev);
if (IS_ERR(nsim_dev))
return PTR_ERR(nsim_dev);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+ mutex_lock(&nsim_dev->port_list_lock);
for (i = 0; i < nsim_bus_dev->port_count; i++) {
err = __nsim_dev_port_add(nsim_dev, i);
if (err)
goto err_port_del_all;
}
+ mutex_unlock(&nsim_dev->port_list_lock);
return 0;
err_port_del_all:
+ mutex_unlock(&nsim_dev->port_list_lock);
nsim_dev_port_del_all(nsim_dev);
nsim_dev_destroy(nsim_dev);
return err;
@@ -839,7 +904,6 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
- nsim_dev_port_del_all(nsim_dev);
nsim_dev_destroy(nsim_dev);
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index f61d094746c0..13540dee7364 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,7 +18,7 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
-#include <net/netns/generic.h>
+#include <net/net_namespace.h>
#include "netdevsim.h"
@@ -33,15 +33,14 @@ struct nsim_per_fib_data {
};
struct nsim_fib_data {
+ struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-static unsigned int nsim_fib_net_id;
-
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
@@ -64,12 +63,10 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
- struct netlink_ext_ack *extack)
+static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
- int err = 0;
switch (res_id) {
case NSIM_RESOURCE_IPV4_FIB:
@@ -85,20 +82,10 @@ int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
entry = &fib_data->ipv6.rules;
break;
default:
- return 0;
- }
-
- /* not allowing a new max to be less than curren occupancy
- * --> no means of evicting entries
- */
- if (val < entry->num) {
- NL_SET_ERR_MSG_MOD(extack, "New size is less than current occupancy");
- err = -EINVAL;
- } else {
- entry->max = val;
+ WARN_ON(1);
+ return;
}
-
- return err;
+ entry->max = val;
}
static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
@@ -120,9 +107,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -157,9 +144,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -178,18 +165,22 @@ static int nsim_fib_event(struct fib_notifier_info *info, bool add)
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(data, info,
+ event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info,
+ event == FIB_EVENT_ENTRY_ADD);
break;
}
@@ -199,68 +190,116 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data;
- struct net *net;
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
- rcu_read_lock();
- for_each_net_rcu(net) {
- data = net_generic(net, nsim_fib_net_id);
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+}
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
+static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
- }
- rcu_read_unlock();
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB, false);
}
-static struct notifier_block nsim_fib_nb = {
- .notifier_call = nsim_fib_event_nb,
-};
-
-/* Initialize per network namespace state */
-static int __net_init nsim_fib_netns_init(struct net *net)
+static u64 nsim_fib_ipv4_rules_res_occ_get(void *priv)
{
- struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_data *data = priv;
- data->ipv4.fib.max = (u64)-1;
- data->ipv4.rules.max = (u64)-1;
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
- data->ipv6.fib.max = (u64)-1;
- data->ipv6.rules.max = (u64)-1;
+static u64 nsim_fib_ipv6_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
- return 0;
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB, false);
}
-static struct pernet_operations nsim_fib_net_ops = {
- .init = nsim_fib_netns_init,
- .id = &nsim_fib_net_id,
- .size = sizeof(struct nsim_fib_data),
-};
+static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
-void nsim_fib_exit(void)
+static void nsim_fib_set_max_all(struct nsim_fib_data *data,
+ struct devlink *devlink)
{
- unregister_pernet_subsys(&nsim_fib_net_ops);
- unregister_fib_notifier(&nsim_fib_nb);
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); i++) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (err)
+ val = (u64) -1;
+ nsim_fib_set_max(data, res_ids[i], val);
+ }
}
-int nsim_fib_init(void)
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
+ struct nsim_fib_data *data;
int err;
- err = register_pernet_subsys(&nsim_fib_net_ops);
- if (err < 0) {
- pr_err("Failed to register pernet subsystem\n");
- goto err_out;
- }
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_set_max_all(data, devlink);
- err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
- if (err < 0) {
+ data->fib_nb.notifier_call = nsim_fib_event_nb;
+ err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
+ nsim_fib_dump_inconsistent, extack);
+ if (err) {
pr_err("Failed to register fib notifier\n");
goto err_out;
}
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_fib_ipv4_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_fib_ipv4_rules_res_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_fib_ipv6_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_fib_ipv6_rules_res_occ_get,
+ data);
+ return data;
+
err_out:
- return err;
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
+{
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB);
+ unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ kfree(data);
}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
new file mode 100644
index 000000000000..2716235a0336
--- /dev/null
+++ b/drivers/net/netdevsim/health.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static int
+nsim_dev_empty_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_dev_empty_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_empty_reporter_ops = {
+ .name = "empty",
+ .dump = nsim_dev_empty_reporter_dump,
+ .diagnose = nsim_dev_empty_reporter_diagnose,
+};
+
+struct nsim_dev_dummy_reporter_ctx {
+ char *break_msg;
+};
+
+static int
+nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+
+ if (health->fail_recover) {
+ /* For testing purposes, user set debugfs fail_recover
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the recover to fail for testing purposes");
+ return -EINVAL;
+ }
+ if (ctx) {
+ kfree(health->recovered_break_msg);
+ health->recovered_break_msg = kstrdup(ctx->break_msg,
+ GFP_KERNEL);
+ if (!health->recovered_break_msg)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
+{
+ char *binary;
+ int err;
+ int i;
+
+ err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_binary");
+ if (err)
+ return err;
+ binary = kmalloc(binary_len, GFP_KERNEL);
+ if (!binary)
+ return -ENOMEM;
+ get_random_bytes(binary, binary_len);
+ err = devlink_fmsg_binary_put(fmsg, binary, binary_len);
+ kfree(binary);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, "test_nest");
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_bool_put(fmsg, true);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u8_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u32_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u64_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg,
+ "in_array_nested_test_bool",
+ false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg,
+ "in_array_nested_test_u8",
+ i);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int
+nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+ int err;
+
+ if (ctx) {
+ err = devlink_fmsg_string_pair_put(fmsg, "break_message",
+ ctx->break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static int
+nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ int err;
+
+ if (health->recovered_break_msg) {
+ err = devlink_fmsg_string_pair_put(fmsg,
+ "recovered_break_message",
+ health->recovered_break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_dummy_reporter_ops = {
+ .name = "dummy",
+ .recover = nsim_dev_dummy_reporter_recover,
+ .dump = nsim_dev_dummy_reporter_dump,
+ .diagnose = nsim_dev_dummy_reporter_diagnose,
+};
+
+static ssize_t nsim_dev_health_break_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_health *health = file->private_data;
+ struct nsim_dev_dummy_reporter_ctx ctx;
+ char *break_msg;
+ int err;
+
+ break_msg = kmalloc(count + 1, GFP_KERNEL);
+ if (!break_msg)
+ return -ENOMEM;
+
+ if (copy_from_user(break_msg, data, count)) {
+ err = -EFAULT;
+ goto out;
+ }
+ break_msg[count] = '\0';
+ if (break_msg[count - 1] == '\n')
+ break_msg[count - 1] = '\0';
+
+ ctx.break_msg = break_msg;
+ err = devlink_health_report(health->dummy_reporter, break_msg, &ctx);
+ if (err)
+ goto out;
+
+out:
+ kfree(break_msg);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_dev_health_break_fops = {
+ .open = simple_open,
+ .write = nsim_dev_health_break_write,
+ .llseek = generic_file_llseek,
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+ int err;
+
+ health->empty_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_empty_reporter_ops,
+ 0, false, health);
+ if (IS_ERR(health->empty_reporter))
+ return PTR_ERR(health->empty_reporter);
+
+ health->dummy_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_dummy_reporter_ops,
+ 0, false, health);
+ if (IS_ERR(health->dummy_reporter)) {
+ err = PTR_ERR(health->dummy_reporter);
+ goto err_empty_reporter_destroy;
+ }
+
+ health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(health->ddir)) {
+ err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
+ goto err_dummy_reporter_destroy;
+ }
+
+ health->recovered_break_msg = NULL;
+ debugfs_create_file("break_health", 0200, health->ddir, health,
+ &nsim_dev_health_break_fops);
+ health->binary_len = 16;
+ debugfs_create_u32("binary_len", 0600, health->ddir,
+ &health->binary_len);
+ health->fail_recover = false;
+ debugfs_create_bool("fail_recover", 0600, health->ddir,
+ &health->fail_recover);
+ return 0;
+
+err_dummy_reporter_destroy:
+ devlink_health_reporter_destroy(health->dummy_reporter);
+err_empty_reporter_destroy:
+ devlink_health_reporter_destroy(health->empty_reporter);
+ return err;
+}
+
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+
+ debugfs_remove_recursive(health->ddir);
+ kfree(health->recovered_break_msg);
+ devlink_health_reporter_destroy(health->dummy_reporter);
+ devlink_health_reporter_destroy(health->empty_reporter);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 55f57f76d01b..2908e0a0d6e1 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -290,6 +290,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
if (!dev)
return ERR_PTR(-ENOMEM);
+ dev_net_set(dev, nsim_dev_net(nsim_dev));
ns = netdev_priv(dev);
ns->netdev = dev;
ns->nsim_dev = nsim_dev;
@@ -357,18 +358,12 @@ static int __init nsim_module_init(void)
if (err)
goto err_dev_exit;
- err = nsim_fib_init();
- if (err)
- goto err_bus_exit;
-
err = rtnl_link_register(&nsim_link_ops);
if (err)
- goto err_fib_exit;
+ goto err_bus_exit;
return 0;
-err_fib_exit:
- nsim_fib_exit();
err_bus_exit:
nsim_bus_exit();
err_dev_exit:
@@ -379,7 +374,6 @@ err_dev_exit:
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
- nsim_fib_exit();
nsim_bus_exit();
nsim_dev_exit();
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 66bf13765ad0..94df795ef4d3 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -134,6 +134,18 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6_FIB_RULES,
};
+struct nsim_dev_health {
+ struct devlink_health_reporter *empty_reporter;
+ struct devlink_health_reporter *dummy_reporter;
+ struct dentry *ddir;
+ char *recovered_break_msg;
+ u32 binary_len;
+ bool fail_recover;
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+
struct nsim_dev_port {
struct list_head list;
struct devlink_port devlink_port;
@@ -161,9 +173,17 @@ struct nsim_dev {
bool fw_update_status;
u32 max_macs;
bool test1;
+ bool dont_allow_reload;
+ bool fail_reload;
struct devlink_region *dummy_region;
+ struct nsim_dev_health health;
};
+static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
+{
+ return devlink_net(priv_to_devlink(nsim_dev));
+}
+
int nsim_dev_init(void);
void nsim_dev_exit(void);
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
@@ -173,11 +193,11 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
unsigned int port_index);
-int nsim_fib_init(void);
-void nsim_fib_exit(void);
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
- struct netlink_ext_ack *extack);
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack);
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max);
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
@@ -215,6 +235,9 @@ struct nsim_bus_dev {
struct device dev;
struct list_head list;
unsigned int port_count;
+ struct net *initial_net; /* Purpose of this is to carry net pointer
+ * during the probe time only.
+ */
unsigned int num_vfs;
struct nsim_vf_config *vfconfigs;
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 2aa7b2e60046..8e30db28fd7d 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -15,6 +15,15 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
+#define AT803X_SPECIFIC_STATUS 0x11
+#define AT803X_SS_SPEED_MASK (3 << 14)
+#define AT803X_SS_SPEED_1000 (2 << 14)
+#define AT803X_SS_SPEED_100 (1 << 14)
+#define AT803X_SS_SPEED_10 (0 << 14)
+#define AT803X_SS_DUPLEX BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
+#define AT803X_SS_MDIX BIT(6)
+
#define AT803X_INTR_ENABLE 0x12
#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
@@ -53,6 +62,7 @@
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define ATH9331_PHY_ID 0x004dd041
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -62,10 +72,6 @@ MODULE_DESCRIPTION("Atheros 803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
-struct at803x_priv {
- bool phy_reset:1;
-};
-
struct at803x_context {
u16 bmcr;
u16 advertise;
@@ -231,20 +237,6 @@ static int at803x_resume(struct phy_device *phydev)
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
-static int at803x_probe(struct phy_device *phydev)
-{
- struct device *dev = &phydev->mdio.dev;
- struct at803x_priv *priv;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- phydev->priv = priv;
-
- return 0;
-}
-
static int at803x_config_init(struct phy_device *phydev)
{
int ret;
@@ -357,19 +349,77 @@ static int at803x_aneg_done(struct phy_device *phydev)
return aneg_done;
}
+static int at803x_read_status(struct phy_device *phydev)
+{
+ int ss, err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ /* Read the AT8035 PHY-Specific Status register, which indicates the
+ * speed and duplex that the PHY is actually using, irrespective of
+ * whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ switch (ss & AT803X_SS_SPEED_MASK) {
+ case AT803X_SS_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ }
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ if (ss & AT803X_SS_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+ }
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ return 0;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
.phy_id = ATH8035_PHY_ID,
.name = "Atheros 8035 ethernet",
.phy_id_mask = AT803X_PHY_ID_MASK,
- .probe = at803x_probe,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
+ .read_status = at803x_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -377,7 +427,6 @@ static struct phy_driver at803x_driver[] = {
.phy_id = ATH8030_PHY_ID,
.name = "Atheros 8030 ethernet",
.phy_id_mask = AT803X_PHY_ID_MASK,
- .probe = at803x_probe,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
@@ -392,16 +441,26 @@ static struct phy_driver at803x_driver[] = {
.phy_id = ATH8031_PHY_ID,
.name = "Atheros 8031 ethernet",
.phy_id_mask = AT803X_PHY_ID_MASK,
- .probe = at803x_probe,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
+ .read_status = at803x_read_status,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Atheros AR9331 built-in PHY",
+ .config_init = at803x_config_init,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
} };
module_phy_driver(at803x_driver);
@@ -410,6 +469,7 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8031_PHY_ID, AT803X_PHY_ID_MASK },
{ ATH8035_PHY_ID, AT803X_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8fc33867e524..af8eabe7a6d4 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -572,6 +572,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.name = _name, \
/* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
+ .soft_reset = genphy_soft_reset, \
.config_init = bcm7xxx_config_init, \
.suspend = bcm7xxx_suspend, \
.resume = bcm7xxx_config_init, \
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 937d0059e8ac..7d68b28bb893 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -26,18 +26,13 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+static int bcm54xx_config_clock_delay(struct phy_device *phydev);
+
static int bcm54210e_config_init(struct phy_device *phydev)
{
int val;
- val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- val |= MII_BCM54XX_AUXCTL_MISC_WREN;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
-
- val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
- val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
- bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ bcm54xx_config_clock_delay(phydev);
if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) {
val = phy_read(phydev, MII_CTRL1000);
@@ -52,26 +47,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
{
int reg;
- /* Clear TX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
- /* Disable TXD to GTXCLK clock delay (default set) */
- /* Bit 9 is the only field in shadow register 00011 */
- bcm_phy_write_shadow(phydev, 0x03, 0);
- }
-
- /* Clear RX internal delay unless requested. */
- if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
- (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
- reg = bcm54xx_auxctl_read(phydev,
- MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
- /* Disable RXD to RXC delay (default set) */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
- /* Clear shadow selector field */
- reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
- MII_BCM54XX_AUXCTL_MISC_WREN | reg);
- }
+ bcm54xx_config_clock_delay(phydev);
/* Enable CLK125 MUX on LED4 if ref clock is enabled. */
if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
@@ -383,9 +359,9 @@ static int bcm5482_config_init(struct phy_device *phydev)
/*
* Select 1000BASE-X register set (primary SerDes)
*/
- reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_MODE);
- bcm_phy_write_shadow(phydev, BCM5482_SHD_MODE,
- reg | BCM5482_SHD_MODE_1000BX);
+ reg = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE,
+ reg | BCM54XX_SHD_MODE_1000BX);
/*
* LED1=ACTIVITYLED, LED3=LINKSPD[2]
@@ -451,12 +427,47 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_probe(struct phy_device *phydev)
+{
+ int val, intf_sel;
+
+ val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ if (val < 0)
+ return val;
+
+ /* The PHY is strapped in RGMII-fiber mode when INTERF_SEL[1:0]
+ * is 01b, and the link between PHY and its link partner can be
+ * either 1000Base-X or 100Base-FX.
+ * RGMII-1000Base-X is properly supported, but RGMII-100Base-FX
+ * support is still missing as of now.
+ */
+ intf_sel = (val & BCM54XX_SHD_INTF_SEL_MASK) >> 1;
+ if (intf_sel == 1) {
+ val = bcm_phy_read_shadow(phydev, BCM54616S_SHD_100FX_CTRL);
+ if (val < 0)
+ return val;
+
+ /* Bit 0 of the SerDes 100-FX Control register, when set
+ * to 1, sets the MII/RGMII -> 100BASE-FX configuration.
+ * When this bit is set to 0, it sets the GMII/RGMII ->
+ * 1000BASE-X configuration.
+ */
+ if (!(val & BCM54616S_100FX_MODE))
+ phydev->dev_flags |= PHY_BCM_FLAGS_MODE_1000BX;
+ }
+
+ return 0;
+}
+
static int bcm54616s_config_aneg(struct phy_device *phydev)
{
int ret;
/* Aneg firsly. */
- ret = genphy_config_aneg(phydev);
+ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ ret = genphy_c37_config_aneg(phydev);
+ else
+ ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
bcm54xx_config_clock_delay(phydev);
@@ -464,6 +475,18 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
return ret;
}
+static int bcm54616s_read_status(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
+ err = genphy_c37_read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
+
+ return err;
+}
+
static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
{
int val;
@@ -655,6 +678,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .read_status = bcm54616s_read_status,
+ .probe = bcm54616s_probe,
}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 37fceaf9fa10..0b95e7a2e273 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -21,8 +23,9 @@
#define MII_DP83867_PHYCTRL 0x10
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
-#define DP83867_CTRL 0x1f
+#define DP83867_CFG2 0x14
#define DP83867_CFG3 0x1e
+#define DP83867_CTRL 0x1f
/* Extended Registers */
#define DP83867_CFG4 0x0031
@@ -36,6 +39,13 @@
#define DP83867_STRAP_STS1 0x006E
#define DP83867_STRAP_STS2 0x006f
#define DP83867_RGMIIDCTL 0x0086
+#define DP83867_RXFCFG 0x0134
+#define DP83867_RXFPMD1 0x0136
+#define DP83867_RXFPMD2 0x0137
+#define DP83867_RXFPMD3 0x0138
+#define DP83867_RXFSOP1 0x0139
+#define DP83867_RXFSOP2 0x013A
+#define DP83867_RXFSOP3 0x013B
#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_SGMIICTL 0x00D3
#define DP83867_10M_SGMII_CFG 0x016F
@@ -65,6 +75,13 @@
/* SGMIICTL bits */
#define DP83867_SGMII_TYPE BIT(14)
+/* RXFCFG bits*/
+#define DP83867_WOL_MAGIC_EN BIT(0)
+#define DP83867_WOL_BCAST_EN BIT(2)
+#define DP83867_WOL_UCAST_EN BIT(4)
+#define DP83867_WOL_SEC_EN BIT(5)
+#define DP83867_WOL_ENH_MAC BIT(7)
+
/* STRAP_STS1 bits */
#define DP83867_STRAP_STS1_RESERVED BIT(11)
@@ -95,6 +112,10 @@
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+/* CFG3 bits */
+#define DP83867_CFG3_INT_OE BIT(7)
+#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
+
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
@@ -126,6 +147,115 @@ static int dp83867_ack_interrupt(struct phy_device *phydev)
return 0;
}
+static int dp83867_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ u16 val_rxcfg, val_micr;
+ u8 *mac;
+
+ val_rxcfg = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+ val_micr = phy_read(phydev, MII_DP83867_MICR);
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_UCAST |
+ WAKE_BCAST)) {
+ val_rxcfg |= DP83867_WOL_ENH_MAC;
+ val_micr |= MII_DP83867_MICR_WOL_INT_EN;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ mac = (u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD1,
+ (mac[1] << 8 | mac[0]));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD2,
+ (mac[3] << 8 | mac[2]));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFPMD3,
+ (mac[5] << 8 | mac[4]));
+
+ val_rxcfg |= DP83867_WOL_MAGIC_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_MAGIC_EN;
+ }
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+
+ val_rxcfg |= DP83867_WOL_SEC_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_SEC_EN;
+ }
+
+ if (wol->wolopts & WAKE_UCAST)
+ val_rxcfg |= DP83867_WOL_UCAST_EN;
+ else
+ val_rxcfg &= ~DP83867_WOL_UCAST_EN;
+
+ if (wol->wolopts & WAKE_BCAST)
+ val_rxcfg |= DP83867_WOL_BCAST_EN;
+ else
+ val_rxcfg &= ~DP83867_WOL_BCAST_EN;
+ } else {
+ val_rxcfg &= ~DP83867_WOL_ENH_MAC;
+ val_micr &= ~MII_DP83867_MICR_WOL_INT_EN;
+ }
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG, val_rxcfg);
+ phy_write(phydev, MII_DP83867_MICR, val_micr);
+
+ return 0;
+}
+
+static void dp83867_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ u16 value, sopass_val;
+
+ wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
+
+ if (value & DP83867_WOL_UCAST_EN)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (value & DP83867_WOL_BCAST_EN)
+ wol->wolopts |= WAKE_BCAST;
+
+ if (value & DP83867_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83867_WOL_SEC_EN) {
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP1);
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP2);
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_RXFSOP3);
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ if (!(value & DP83867_WOL_ENH_MAC))
+ wol->wolopts = 0;
+}
+
static int dp83867_config_intr(struct phy_device *phydev)
{
int micr_status;
@@ -295,7 +425,7 @@ static int dp83867_probe(struct phy_device *phydev)
phydev->priv = dp83867;
- return 0;
+ return dp83867_of_init(phydev);
}
static int dp83867_config_init(struct phy_device *phydev)
@@ -304,10 +434,6 @@ static int dp83867_config_init(struct phy_device *phydev)
int ret, val, bs;
u16 delay;
- ret = dp83867_of_init(phydev);
- if (ret)
- return ret;
-
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
@@ -410,12 +536,13 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
}
+ val = phy_read(phydev, DP83867_CFG3);
/* Enable Interrupt output INT_OE in CFG3 register */
- if (phy_interrupt_is_valid(phydev)) {
- val = phy_read(phydev, DP83867_CFG3);
- val |= BIT(7);
- phy_write(phydev, DP83867_CFG3, val);
- }
+ if (phy_interrupt_is_valid(phydev))
+ val |= DP83867_CFG3_INT_OE;
+
+ val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
+ phy_write(phydev, DP83867_CFG3, val);
if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
dp83867_config_port_mirroring(phydev);
@@ -463,6 +590,9 @@ static struct phy_driver dp83867_driver[] = {
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
+ .get_wol = dp83867_get_wol,
+ .set_wol = dp83867_set_wol,
+
/* IRQ related */
.ack_interrupt = dp83867_ack_interrupt,
.config_intr = dp83867_config_intr,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a7796134e3be..b1fbd1937328 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -53,16 +53,22 @@
#define MII_M1011_PHY_SCR 0x10
#define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11)
-#define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12
-#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK 0x7800
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MASK GENMASK(14, 12)
+#define MII_M1011_PHY_SCR_DOWNSHIFT_MAX 8
#define MII_M1011_PHY_SCR_MDI (0x0 << 5)
#define MII_M1011_PHY_SCR_MDI_X (0x1 << 5)
#define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5)
+#define MII_M1011_PHY_SSR 0x11
+#define MII_M1011_PHY_SSR_DOWNSHIFT BIT(5)
+
#define MII_M1111_PHY_LED_CONTROL 0x18
#define MII_M1111_PHY_LED_DIRECT 0x4100
#define MII_M1111_PHY_LED_COMBINE 0x411c
#define MII_M1111_PHY_EXT_CR 0x14
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK GENMASK(11, 9)
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX 8
+#define MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN BIT(8)
#define MII_M1111_RGMII_RX_DELAY BIT(7)
#define MII_M1111_RGMII_TX_DELAY BIT(1)
#define MII_M1111_PHY_EXT_SR 0x1b
@@ -273,23 +279,6 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity)
return val != reg;
}
-static int marvell_set_downshift(struct phy_device *phydev, bool enable,
- u8 retries)
-{
- int reg;
-
- reg = phy_read(phydev, MII_M1011_PHY_SCR);
- if (reg < 0)
- return reg;
-
- reg &= MII_M1011_PHY_SRC_DOWNSHIFT_MASK;
- reg |= ((retries - 1) << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT);
- if (enable)
- reg |= MII_M1011_PHY_SCR_DOWNSHIFT_EN;
-
- return phy_write(phydev, MII_M1011_PHY_SCR, reg);
-}
-
static int marvell_config_aneg(struct phy_device *phydev)
{
int changed = 0;
@@ -658,41 +647,6 @@ static int marvell_config_init(struct phy_device *phydev)
return marvell_of_reg_init(phydev);
}
-static int m88e1116r_config_init(struct phy_device *phydev)
-{
- int err;
-
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
-
- msleep(500);
-
- err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
- if (err < 0)
- return err;
-
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
- if (err < 0)
- return err;
-
- err = marvell_set_downshift(phydev, true, 8);
- if (err < 0)
- return err;
-
- if (phy_interface_is_rgmii(phydev)) {
- err = m88e1121_config_aneg_rgmii_delays(phydev);
- if (err < 0)
- return err;
- }
-
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
-
- return marvell_config_init(phydev);
-}
-
static int m88e3016_config_init(struct phy_device *phydev)
{
int ret;
@@ -833,6 +787,172 @@ static int m88e1111_config_init(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
+static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, val) + 1;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val;
+
+ if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
+
+ val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+
+ return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+ MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+ val);
+}
+
+static int m88e1111_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1111_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1111_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1111_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, MII_M1011_PHY_SCR);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, val) + 1;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val;
+
+ if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN);
+
+ val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+
+ return phy_modify(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_DOWNSHIFT_EN |
+ MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
+ val);
+}
+
+static int m88e1011_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1011_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void m88e1011_link_change_notify(struct phy_device *phydev)
+{
+ int status;
+
+ if (phydev->state != PHY_RUNNING)
+ return;
+
+ /* we may be on fiber page currently */
+ status = phy_read_paged(phydev, MII_MARVELL_COPPER_PAGE,
+ MII_M1011_PHY_SSR);
+
+ if (status > 0 && status & MII_M1011_PHY_SSR_DOWNSHIFT)
+ phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
+}
+
+static int m88e1116r_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ msleep(500);
+
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ return err;
+
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ if (err < 0)
+ return err;
+
+ err = m88e1011_set_downshift(phydev, 8);
+ if (err < 0)
+ return err;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ err = m88e1121_config_aneg_rgmii_delays(phydev);
+ if (err < 0)
+ return err;
+ }
+
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ return marvell_config_init(phydev);
+}
+
static int m88e1318_config_init(struct phy_device *phydev)
{
if (phy_interrupt_is_valid(phydev)) {
@@ -1117,6 +1237,8 @@ static int m88e1540_get_tunable(struct phy_device *phydev,
switch (tuna->id) {
case ETHTOOL_PHY_FAST_LINK_DOWN:
return m88e1540_get_fld(phydev, data);
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_get_downshift(phydev, data);
default:
return -EOPNOTSUPP;
}
@@ -1128,6 +1250,8 @@ static int m88e1540_set_tunable(struct phy_device *phydev,
switch (tuna->id) {
case ETHTOOL_PHY_FAST_LINK_DOWN:
return m88e1540_set_fld(phydev, data);
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return m88e1011_set_downshift(phydev, *(const u8 *)data);
default:
return -EOPNOTSUPP;
}
@@ -2163,6 +2287,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1111,
@@ -2182,6 +2309,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1118,
@@ -2220,6 +2350,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1318S,
@@ -2261,6 +2394,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -2314,6 +2450,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1510,
@@ -2337,6 +2476,9 @@ static struct phy_driver marvell_drivers[] = {
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
.set_loopback = genphy_loopback,
+ .get_tunable = m88e1011_get_tunable,
+ .set_tunable = m88e1011_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
@@ -2359,6 +2501,7 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2379,6 +2522,9 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2421,6 +2567,7 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
+ .link_change_notify = m88e1011_link_change_notify,
},
};
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e282600bd83e..c1d345c3cab3 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -121,7 +121,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
return;
if (mdiodev->reset_gpio)
- gpiod_set_value(mdiodev->reset_gpio, value);
+ gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
if (mdiodev->reset_ctrl) {
if (value)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2fea5541c35a..63dedec0433d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
+ const u32 ksz_phy_id)
+{
+ int ret;
+
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+ return 0;
+
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+
+ /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
+ * exact PHY ID. However, they can be told apart by the extended
+ * capability registers presence. The KSZ8051 PHY has them while
+ * the switch does not.
+ */
+ ret &= BMSR_ERCAP;
+ if (ksz_phy_id == PHY_ID_KSZ8051)
+ return ret;
+ else
+ return !ret;
+}
+
+static int ksz8051_match_phy_device(struct phy_device *phydev)
+{
+ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+}
+
static int ksz8081_config_init(struct phy_device *phydev)
{
/* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
@@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
+static int ksz8795_match_phy_device(struct phy_device *phydev)
+{
+ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8051,
- .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8051_type,
@@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
+ .match_phy_device = ksz8051_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8795,
- .phy_id_mask = MICREL_PHY_ID_MASK,
- .name = "Micrel KSZ8795",
+ .name = "Micrel KSZ87XX Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
+ .match_phy_device = ksz8795_match_phy_device,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7ada1fd9ca71..805cda3465d7 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -895,7 +895,7 @@ static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
static int vsc8531_pre_init_seq_set(struct phy_device *phydev)
{
int rc;
- const struct reg_val init_seq[] = {
+ static const struct reg_val init_seq[] = {
{0x0f90, 0x00688980},
{0x0696, 0x00000003},
{0x07fa, 0x0050100f},
@@ -939,7 +939,7 @@ out_unlock:
static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
{
- const struct reg_val init_eee[] = {
+ static const struct reg_val init_eee[] = {
{0x0f82, 0x0012b00a},
{0x1686, 0x00000004},
{0x168c, 0x00d2c46f},
@@ -1224,7 +1224,7 @@ out:
/* bus->mdio_lock should be locked when using this function */
static int vsc8574_config_pre_init(struct phy_device *phydev)
{
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x0fae, 0x000401bd},
{0x0fac, 0x000f000f},
{0x17a0, 0x00a0f147},
@@ -1272,7 +1272,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev)
{0x0fee, 0x0004a6a1},
{0x0ffe, 0x00b01807},
};
- const struct reg_val pre_init2[] = {
+ static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
@@ -1427,7 +1427,7 @@ out:
/* bus->mdio_lock should be locked when using this function */
static int vsc8584_config_pre_init(struct phy_device *phydev)
{
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x07fa, 0x0050100f},
{0x1688, 0x00049f81},
{0x0f90, 0x00688980},
@@ -1451,7 +1451,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
{0x16b2, 0x00007000},
{0x16b4, 0x00000814},
};
- const struct reg_val pre_init2[] = {
+ static const struct reg_val pre_init2[] = {
{0x0486, 0x0008a518},
{0x0488, 0x006dc696},
{0x048a, 0x00000912},
@@ -1786,7 +1786,7 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
* values to handle hardware performance of PHY. They
* are set at Power-On state and remain until PHY Reset.
*/
- const struct reg_val pre_init1[] = {
+ static const struct reg_val pre_init1[] = {
{0x0f90, 0x00688980},
{0x0786, 0x00000003},
{0x07fa, 0x0050100f},
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 7935593debb1..a1caeee12236 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
{
int val;
+ linkmode_zero(phydev->lp_advertising);
+
val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
if (val < 0)
return val;
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 369903d9b6ec..5458ed1b87a8 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 69,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 74,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -42,6 +42,8 @@ const char *phy_speed_to_str(int speed)
return "100Gbps";
case SPEED_200000:
return "200Gbps";
+ case SPEED_400000:
+ return "400Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -70,6 +72,12 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
.bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
static const struct phy_setting settings[] = {
+ /* 400G */
+ PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseDR8_Full ),
+ PHY_SETTING( 400000, FULL, 400000baseSR8_Full ),
/* 200G */
PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
@@ -283,6 +291,18 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+void phy_resolve_aneg_pause(struct phy_device *phydev)
+{
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising);
+ phydev->asym_pause = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising);
+ }
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
+
/**
* phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
* @phydev: The phy_device struct
@@ -305,13 +325,7 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
break;
}
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->lp_advertising);
- phydev->asym_pause = linkmode_test_bit(
- ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->lp_advertising);
- }
+ phy_resolve_aneg_pause(phydev);
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
@@ -683,11 +697,17 @@ EXPORT_SYMBOL_GPL(phy_modify_mmd);
static int __phy_read_page(struct phy_device *phydev)
{
+ if (WARN_ONCE(!phydev->drv->read_page, "read_page callback not available, PHY driver not loaded?\n"))
+ return -EOPNOTSUPP;
+
return phydev->drv->read_page(phydev);
}
static int __phy_write_page(struct phy_device *phydev, int page)
{
+ if (WARN_ONCE(!phydev->drv->write_page, "write_page callback not available, PHY driver not loaded?\n"))
+ return -EOPNOTSUPP;
+
return phydev->drv->write_page(phydev, page);
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7c92afd36bbe..105d389b58e7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -457,6 +457,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
val);
change_autoneg = true;
break;
+ case MII_CTRL1000:
+ mii_ctrl1000_mod_linkmode_adv_t(phydev->advertising,
+ val);
+ change_autoneg = true;
+ break;
default:
/* do nothing */
break;
@@ -567,9 +572,6 @@ int phy_start_aneg(struct phy_device *phydev)
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
- /* Invalidate LP advertising flags */
- linkmode_zero(phydev->lp_advertising);
-
err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d347ddcac45b..fa71998fea51 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1270,7 +1270,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev_err(phydev, "error creating 'phy_standalone' sysfs entry\n");
}
- phydev->dev_flags = flags;
+ phydev->dev_flags |= flags;
phydev->interface = interface;
@@ -1608,6 +1608,40 @@ static int genphy_config_advert(struct phy_device *phydev)
}
/**
+ * genphy_c37_config_advert - sanitize and advertise auto-negotiation parameters
+ * @phydev: target phy_device struct
+ *
+ * Description: Writes MII_ADVERTISE with the appropriate values,
+ * after sanitizing the values to make sure we only advertise
+ * what is supported. Returns < 0 on error, 0 if the PHY's advertisement
+ * hasn't changed, and > 0 if it has changed. This function is intended
+ * for Clause 37 1000Base-X mode.
+ */
+static int genphy_c37_config_advert(struct phy_device *phydev)
+{
+ u16 adv = 0;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return phy_modify_changed(phydev, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XHALF | ADVERTISE_1000XPSE_ASYM,
+ adv);
+}
+
+/**
* genphy_config_eee_advert - disable unwanted eee mode advertisement
* @phydev: target phy_device struct
*
@@ -1716,6 +1750,54 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
EXPORT_SYMBOL(__genphy_config_aneg);
/**
+ * genphy_c37_config_aneg - restart auto-negotiation or write BMCR
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR. This function is intended
+ * for use with Clause 37 1000Base-X mode.
+ */
+int genphy_c37_config_aneg(struct phy_device *phydev)
+{
+ int err, changed;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return genphy_setup_forced(phydev);
+
+ err = phy_modify(phydev, MII_BMCR, BMCR_SPEED1000 | BMCR_SPEED100,
+ BMCR_SPEED1000);
+ if (err)
+ return err;
+
+ changed = genphy_c37_config_advert(phydev);
+ if (changed < 0) /* error */
+ return changed;
+
+ if (!changed) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+ changed = 1; /* do restart aneg */
+ }
+
+ /* Only restart aneg if we are advertising something different
+ * than we were before.
+ */
+ if (changed > 0)
+ return genphy_restart_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c37_config_aneg);
+
+/**
* genphy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
@@ -1783,34 +1865,18 @@ done:
}
EXPORT_SYMBOL(genphy_update_link);
-/**
- * genphy_read_status - check the link status and update current link state
- * @phydev: target phy_device struct
- *
- * Description: Check the link, then figure out the current state
- * by comparing what we advertise with what the link partner
- * advertises. Start by checking the gigabit possibilities,
- * then move on to 10/100.
- */
-int genphy_read_status(struct phy_device *phydev)
+int genphy_read_lpa(struct phy_device *phydev)
{
- int lpa, lpagb, err, old_link = phydev->link;
+ int lpa, lpagb;
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (!phydev->autoneg_complete) {
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ 0);
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ return 0;
+ }
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
@@ -1838,6 +1904,46 @@ int genphy_read_status(struct phy_device *phydev)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ } else {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_lpa);
+
+/**
+ * genphy_read_status - check the link status and update current link state
+ * @phydev: target phy_device struct
+ *
+ * Description: Check the link, then figure out the current state
+ * by comparing what we advertise with what the link partner
+ * advertises. Start by checking the gigabit possibilities,
+ * then move on to 10/100.
+ */
+int genphy_read_status(struct phy_device *phydev)
+{
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
phy_resolve_aneg_linkmode(phydev);
} else if (phydev->autoneg == AUTONEG_DISABLE) {
int bmcr = phy_read(phydev, MII_BMCR);
@@ -1863,6 +1969,63 @@ int genphy_read_status(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_read_status);
/**
+ * genphy_c37_read_status - check the link status and update current link state
+ * @phydev: target phy_device struct
+ *
+ * Description: Check the link, then figure out the current state
+ * by comparing what we advertise with what the link partner
+ * advertises. This function is for Clause 37 1000Base-X mode.
+ */
+int genphy_c37_read_status(struct phy_device *phydev)
+{
+ int lpa, err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->lp_advertising, lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising, lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising,
+ lpa & LPA_1000XPAUSE_ASYM);
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c37_read_status);
+
+/**
* genphy_soft_reset - software reset the PHY via BMCR_RESET bit
* @phydev: target phy_device struct
*
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a5a57ca94c1a..f16d9e92a81a 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -87,8 +87,24 @@ struct phylink {
phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
#define phylink_info(pl, fmt, ...) \
phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define phylink_dbg(pl, fmt, ...) \
+do { \
+ if ((pl)->config->type == PHYLINK_NETDEV) \
+ netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__); \
+ else if ((pl)->config->type == PHYLINK_DEV) \
+ dev_dbg((pl)->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define phylink_dbg(pl, fmt, ...) \
phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+#else
+#define phylink_dbg(pl, fmt, ...) \
+({ \
+ if (0) \
+ phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__); \
+})
+#endif
/**
* phylink_set_port_modes() - set the port type modes in the ethtool mask
@@ -117,9 +133,7 @@ static int phylink_is_empty_linkmode(const unsigned long *linkmode)
phylink_set(tmp, Pause);
phylink_set(tmp, Asym_Pause);
- bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- return linkmode_empty(tmp);
+ return linkmode_subset(linkmode, tmp);
}
static const char *phylink_an_mode_str(unsigned int mode)
@@ -550,33 +564,24 @@ static const struct sfp_upstream_ops sfp_phylink_ops;
static int phylink_register_sfp(struct phylink *pl,
struct fwnode_handle *fwnode)
{
- struct fwnode_reference_args ref;
+ struct sfp_bus *bus;
int ret;
- if (!fwnode)
- return 0;
-
- ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
- 0, 0, &ref);
- if (ret < 0) {
- if (ret == -ENOENT)
- return 0;
-
- phylink_err(pl, "unable to parse \"sfp\" node: %d\n",
- ret);
+ bus = sfp_register_upstream_node(fwnode, pl, &sfp_phylink_ops);
+ if (IS_ERR(bus)) {
+ ret = PTR_ERR(bus);
+ phylink_err(pl, "unable to attach SFP bus: %d\n", ret);
return ret;
}
- pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
- if (!pl->sfp_bus)
- return -ENOMEM;
+ pl->sfp_bus = bus;
return 0;
}
/**
* phylink_create() - create a phylink instance
- * @ndev: a pointer to the &struct net_device
+ * @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
* interface
* @iface: the desired link mode defined by &typedef phy_interface_t
@@ -1728,8 +1733,7 @@ static int phylink_sfp_module_insert(void *upstream,
if (phy_interface_mode_is_8023z(iface) && pl->phydev)
return -EINVAL;
- changed = !bitmap_equal(pl->supported, support,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ changed = !linkmode_equal(pl->supported, support);
if (changed) {
linkmode_copy(pl->supported, support);
linkmode_copy(pl->link_config.advertising, config.advertising);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index b23fc41896ef..d037aab6a71d 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/phylink.h>
+#include <linux/property.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -445,45 +446,63 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
}
/**
- * sfp_register_upstream() - Register the neighbouring device
- * @fwnode: firmware node for the SFP bus
+ * sfp_register_upstream_node() - parse and register the neighbouring device
+ * @fwnode: firmware node for the parent device (MAC or PHY)
* @upstream: the upstream private data
* @ops: the upstream's &struct sfp_upstream_ops
*
- * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
- * should use phylink, which will call this function for them. Returns
- * a pointer to the allocated &struct sfp_bus.
+ * Parse the parent device's firmware node for a SFP bus, and register the
+ * SFP bus using sfp_register_upstream().
*
- * On error, returns %NULL.
+ * Returns: on success, a pointer to the sfp_bus structure,
+ * %NULL if no SFP is specified,
+ * on failure, an error pointer value:
+ * corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * %-ENOMEM if we failed to allocate the bus.
+ * an error from the upstream's connect_phy() method.
*/
-struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops)
+struct sfp_bus *sfp_register_upstream_node(struct fwnode_handle *fwnode,
+ void *upstream,
+ const struct sfp_upstream_ops *ops)
{
- struct sfp_bus *bus = sfp_bus_get(fwnode);
- int ret = 0;
+ struct fwnode_reference_args ref;
+ struct sfp_bus *bus;
+ int ret;
- if (bus) {
- rtnl_lock();
- bus->upstream_ops = ops;
- bus->upstream = upstream;
+ ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+ 0, 0, &ref);
+ if (ret == -ENOENT)
+ return NULL;
+ else if (ret < 0)
+ return ERR_PTR(ret);
- if (bus->sfp) {
- ret = sfp_register_bus(bus);
- if (ret)
- sfp_upstream_clear(bus);
- }
- rtnl_unlock();
+ bus = sfp_bus_get(ref.fwnode);
+ fwnode_handle_put(ref.fwnode);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ rtnl_lock();
+ bus->upstream_ops = ops;
+ bus->upstream = upstream;
+
+ if (bus->sfp) {
+ ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_upstream_clear(bus);
+ } else {
+ ret = 0;
}
+ rtnl_unlock();
if (ret) {
sfp_bus_put(bus);
- bus = NULL;
+ bus = ERR_PTR(ret);
}
return bus;
}
-EXPORT_SYMBOL_GPL(sfp_register_upstream);
+EXPORT_SYMBOL_GPL(sfp_register_upstream_node);
/**
* sfp_unregister_upstream() - Unregister sfp bus
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index dc3d92d340c4..b73298250793 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -327,6 +327,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8740",
/* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9a1b006904a7..61824bbb5588 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_device *dev)
{
struct ppp *ppp;
- netdev_lockdep_set_classes(dev);
-
ppp = netdev_priv(dev);
/* Let the netdevice take a reference on the ppp file. This ensures
* that ppp_destroy_interface() won't run before the device gets
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 734de7de03f7..e1fabb3e3246 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -238,7 +238,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
- nf_reset(skb);
+ nf_reset_ct(skb);
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(net, skb, NULL);
@@ -358,7 +358,7 @@ static int pptp_rcv(struct sk_buff *skb)
po = lookup_chan(htons(header->call_id), iph->saddr);
if (po) {
skb_dst_drop(skb);
- nf_reset(skb);
+ nf_reset_ct(skb);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e8089def5a46..ca70a1d840eb 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1615,7 +1615,6 @@ static int team_init(struct net_device *dev)
int err;
team->dev = dev;
- mutex_init(&team->lock);
team_set_no_mode(team);
team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
@@ -1642,7 +1641,8 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
- netdev_lockdep_set_classes(dev);
+ lockdep_register_key(&team->team_lock_key);
+ __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
return 0;
@@ -1673,6 +1673,7 @@ static void team_uninit(struct net_device *dev)
team_queue_override_fini(team);
mutex_unlock(&team->lock);
netdev_change_features(dev);
+ lockdep_unregister_key(&team->team_lock_key);
}
static void team_destructor(struct net_device *dev)
@@ -1976,8 +1977,15 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
err = team_port_del(team, port_dev);
mutex_unlock(&team->lock);
- if (!err)
- netdev_change_features(dev);
+ if (err)
+ return err;
+
+ if (netif_is_team_master(port_dev)) {
+ lockdep_unregister_key(&team->team_lock_key);
+ lockdep_register_key(&team->team_lock_key);
+ lockdep_set_class(&team->lock, &team->team_lock_key);
+ }
+ netdev_change_features(dev);
return err;
}
@@ -2066,7 +2074,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.port = PORT_OTHER;
- list_for_each_entry(port, &team->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
if (team_port_txable(port)) {
if (port->state.speed != SPEED_UNKNOWN)
speed += port->state.speed;
@@ -2075,6 +2084,8 @@ static int team_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.duplex = port->state.duplex;
}
}
+ rcu_read_unlock();
+
cmd->base.speed = speed ? : SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index aab0be40d443..dab6cccfeb52 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
- if (e->queue_index != queue_index)
- e->queue_index = queue_index;
+ if (READ_ONCE(e->queue_index) != queue_index)
+ WRITE_ONCE(e->queue_index, queue_index);
if (e->updated != jiffies)
e->updated = jiffies;
sock_rps_record_flow_hash(e->rps_rxhash);
@@ -1104,7 +1104,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
*/
skb_orphan(skb);
- nf_reset(skb);
+ nf_reset_ct(skb);
if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;
@@ -2290,7 +2290,13 @@ static void tun_free_netdev(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
+
free_percpu(tun->pcpu_stats);
+ /* We clear pcpu_stats so that tun_set_iff() can tell if
+ * tun_free_netdev() has been called from register_netdevice().
+ */
+ tun->pcpu_stats = NULL;
+
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2782,9 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
- err = dev_get_valid_name(net, dev, name);
- if (err < 0)
- goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
@@ -2859,8 +2862,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err_detach:
tun_detach_all(dev);
- /* register_netdevice() already called tun_free_netdev() */
- goto err_free_dev;
+ /* We are here because register_netdevice() has failed.
+ * If register_netdevice() already called tun_free_netdev()
+ * while dealing with the error, tun->pcpu_stats has been cleared.
+ */
+ if (!tun->pcpu_stats)
+ goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index daa54486ab09..c5a6e75c24e3 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1214,6 +1214,32 @@ static int ax88179_led_setting(struct usbnet *dev)
return 0;
}
+static void ax88179_get_mac_addr(struct usbnet *dev)
+{
+ u8 mac[ETH_ALEN];
+
+ /* Maybe the boot loader passed the MAC address via device tree */
+ if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from device tree");
+ } else {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, mac);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC address read from ASIX chip");
+ }
+
+ if (is_valid_ether_addr(mac)) {
+ memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ } else {
+ netdev_info(dev->net, "invalid MAC address, using random\n");
+ eth_hw_addr_random(dev->net);
+ }
+
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
+ dev->net->dev_addr);
+}
+
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
u8 buf[5];
@@ -1240,8 +1266,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
msleep(100);
- ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
- ETH_ALEN, dev->net->dev_addr);
+ /* Read MAC address from DTB or asix chip */
+ ax88179_get_mac_addr(dev);
memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
/* RX bulk configuration */
@@ -1541,8 +1567,8 @@ static int ax88179_reset(struct usbnet *dev)
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev, 0);
- ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
- dev->net->dev_addr);
+ /* Read MAC address from DTB or asix chip */
+ ax88179_get_mac_addr(dev);
/* RX bulk configuration */
memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 32f53de5b1fe..0cdb2ce47645 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -766,6 +766,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
@@ -787,6 +794,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index ce78714f536f..74849da031fa 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -186,7 +186,7 @@ struct hso_tiocmget {
int intr_completed;
struct usb_endpoint_descriptor *endp;
struct urb *urb;
- struct hso_serial_state_notification serial_state_notification;
+ struct hso_serial_state_notification *serial_state_notification;
u16 prev_UART_state_bitmap;
struct uart_icount icount;
};
@@ -1432,7 +1432,7 @@ static int tiocmget_submit_urb(struct hso_serial *serial,
usb_rcvintpipe(usb,
tiocmget->endp->
bEndpointAddress & 0x7F),
- &tiocmget->serial_state_notification,
+ tiocmget->serial_state_notification,
sizeof(struct hso_serial_state_notification),
tiocmget_intr_callback, serial,
tiocmget->endp->bInterval);
@@ -1479,7 +1479,7 @@ static void tiocmget_intr_callback(struct urb *urb)
/* wIndex should be the USB interface number of the port to which the
* notification applies, which should always be the Modem port.
*/
- serial_state_notification = &tiocmget->serial_state_notification;
+ serial_state_notification = tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
@@ -2565,6 +2565,8 @@ static void hso_free_tiomget(struct hso_serial *serial)
usb_free_urb(tiocmget->urb);
tiocmget->urb = NULL;
serial->tiocmget = NULL;
+ kfree(tiocmget->serial_state_notification);
+ tiocmget->serial_state_notification = NULL;
kfree(tiocmget);
}
}
@@ -2615,19 +2617,26 @@ static struct hso_device *hso_create_bulk_serial_device(
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ serial->tiocmget->serial_state_notification
+ = kzalloc(sizeof(struct hso_serial_state_notification),
+ GFP_KERNEL);
/* it isn't going to break our heart if serial->tiocmget
* allocation fails don't bother checking this.
*/
- if (serial->tiocmget) {
+ if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
tiocmget = serial->tiocmget;
+ tiocmget->endp = hso_get_ep(interface,
+ USB_ENDPOINT_XFER_INT,
+ USB_DIR_IN);
+ if (!tiocmget->endp) {
+ dev_err(&interface->dev, "Failed to find INT IN ep\n");
+ goto exit;
+ }
+
tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
if (tiocmget->urb) {
mutex_init(&tiocmget->mutex);
init_waitqueue_head(&tiocmget->waitq);
- tiocmget->endp = hso_get_ep(
- interface,
- USB_ENDPOINT_XFER_INT,
- USB_DIR_IN);
} else
hso_free_tiomget(serial);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 58f5a219fb65..cf1f3f0a4b9b 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1264,8 +1264,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
- if (dev->domain_data.phyirq > 0)
+ if (dev->domain_data.phyirq > 0) {
+ local_irq_disable();
generic_handle_irq(dev->domain_data.phyirq);
+ local_irq_enable();
+ }
} else
netdev_warn(dev->net,
"unexpected interrupt: 0x%08x\n", intdata);
@@ -3782,10 +3785,14 @@ static int lan78xx_probe(struct usb_interface *intf,
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
+ ret = lan78xx_phy_init(dev);
+ if (ret < 0)
+ goto out4;
+
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out4;
+ goto out5;
}
usb_set_intfdata(intf, dev);
@@ -3798,14 +3805,10 @@ static int lan78xx_probe(struct usb_interface *intf,
pm_runtime_set_autosuspend_delay(&udev->dev,
DEFAULT_AUTOSUSPEND_DELAY);
- ret = lan78xx_phy_init(dev);
- if (ret < 0)
- goto out5;
-
return 0;
out5:
- unregister_netdev(netdev);
+ phy_disconnect(netdev->phydev);
out4:
usb_free_urb(dev->urb_intr);
out3:
@@ -3992,9 +3995,6 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
u32 buf;
int ret;
- int event;
-
- event = message.event;
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b6dc5d714b5e..596428ec71df 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
@@ -1350,6 +1351,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 08726090570e..ac079395c8d4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,9 +24,11 @@
#include <linux/suspend.h>
#include <linux/atomic.h>
#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <crypto/hash.h>
/* Information for net-next */
-#define NETNEXT_VERSION "10"
+#define NETNEXT_VERSION "11"
/* Information for net */
#define NET_VERSION "10"
@@ -54,8 +56,11 @@
#define PLA_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
+#define PLA_UPHY_TIMER 0xd388
#define PLA_SUSPEND_FLAG 0xd38a
#define PLA_INDICATE_FALG 0xd38c
+#define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */
+#define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */
#define PLA_EXTRA_STATUS 0xd398
#define PLA_EFUSE_DATA 0xdd00
#define PLA_EFUSE_CMD 0xdd02
@@ -110,7 +115,12 @@
#define USB_CONNECT_TIMER 0xcbf8
#define USB_MSC_TIMER 0xcbfc
#define USB_BURST_SIZE 0xcfc0
+#define USB_FW_FIX_EN0 0xcfca
+#define USB_FW_FIX_EN1 0xcfcc
#define USB_LPM_CONFIG 0xcfd8
+#define USB_CSTMR 0xcfef /* RTL8153A */
+#define USB_FW_CTRL 0xd334 /* RTL8153B */
+#define USB_FC_TIMER 0xd340
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
@@ -126,6 +136,7 @@
#define USB_LPM_CTRL 0xd41a
#define USB_BMU_RESET 0xd4b0
#define USB_U1U2_TIMER 0xd4da
+#define USB_FW_TASK 0xd4e8 /* RTL8153B */
#define USB_UPS_CTRL 0xd800
#define USB_POWER_CUT 0xd80a
#define USB_MISC_0 0xd81a
@@ -133,18 +144,19 @@
#define USB_AFE_CTRL2 0xd824
#define USB_UPS_CFG 0xd842
#define USB_UPS_FLAGS 0xd848
+#define USB_WDT1_CTRL 0xe404
#define USB_WDT11_CTRL 0xe43c
-#define USB_BP_BA 0xfc26
-#define USB_BP_0 0xfc28
-#define USB_BP_1 0xfc2a
-#define USB_BP_2 0xfc2c
-#define USB_BP_3 0xfc2e
-#define USB_BP_4 0xfc30
-#define USB_BP_5 0xfc32
-#define USB_BP_6 0xfc34
-#define USB_BP_7 0xfc36
-#define USB_BP_EN 0xfc38
-#define USB_BP_8 0xfc38
+#define USB_BP_BA PLA_BP_BA
+#define USB_BP_0 PLA_BP_0
+#define USB_BP_1 PLA_BP_1
+#define USB_BP_2 PLA_BP_2
+#define USB_BP_3 PLA_BP_3
+#define USB_BP_4 PLA_BP_4
+#define USB_BP_5 PLA_BP_5
+#define USB_BP_6 PLA_BP_6
+#define USB_BP_7 PLA_BP_7
+#define USB_BP_EN PLA_BP_EN /* RTL8153A */
+#define USB_BP_8 0xfc38 /* RTL8153B */
#define USB_BP_9 0xfc3a
#define USB_BP_10 0xfc3c
#define USB_BP_11 0xfc3e
@@ -175,6 +187,7 @@
#define OCP_PHY_STATE 0xa708 /* nway state for 8153 */
#define OCP_PHY_PATCH_STAT 0xb800
#define OCP_PHY_PATCH_CMD 0xb820
+#define OCP_PHY_LOCK 0xb82e
#define OCP_ADC_IOFFSET 0xbcfc
#define OCP_ADC_CFG 0xbc06
#define OCP_SYSCLK_CFG 0xc416
@@ -185,6 +198,7 @@
#define SRAM_10M_AMP1 0x8080
#define SRAM_10M_AMP2 0x8082
#define SRAM_IMPEDANCE 0x8084
+#define SRAM_PHY_LOCK 0xb82e
/* PLA_RCR */
#define RCR_AAP 0x00000001
@@ -346,7 +360,12 @@
/* PLA_INDICATE_FALG */
#define UPCOMING_RUNTIME_D3 BIT(0)
+/* PLA_MACDBG_PRE and PLA_MACDBG_POST */
+#define DEBUG_OE BIT(0)
+#define DEBUG_LTSSM 0x0082
+
/* PLA_EXTRA_STATUS */
+#define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */
#define LINK_CHANGE_FLAG BIT(8)
/* USB_USB2PHY */
@@ -368,6 +387,12 @@
#define STAT_SPEED_HIGH 0x0000
#define STAT_SPEED_FULL 0x0002
+/* USB_FW_FIX_EN0 */
+#define FW_FIX_SUSPEND BIT(14)
+
+/* USB_FW_FIX_EN1 */
+#define FW_IP_RESET_EN BIT(9)
+
/* USB_LPM_CONFIG */
#define LPM_U1U2_EN BIT(0)
@@ -392,12 +417,24 @@
#define OWN_UPDATE BIT(0)
#define OWN_CLEAR BIT(1)
+/* USB_FW_TASK */
+#define FC_PATCH_TASK BIT(1)
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
#define RESUME_INDICATE 0x0001
+/* USB_CSTMR */
+#define FORCE_SUPER BIT(0)
+
+/* USB_FW_CTRL */
+#define FLOW_CTRL_PATCH_OPT BIT(1)
+
+/* USB_FC_TIMER */
+#define CTRL_TIMER_EN BIT(15)
+
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
#define RX_ZERO_EN 0x0080
@@ -419,6 +456,9 @@
#define COALESCE_HIGH 250000U
#define COALESCE_SLOW 524280U
+/* USB_WDT1_CTRL */
+#define WTD1_EN BIT(0)
+
/* USB_WDT11_CTRL */
#define TIMER11_EN 0x0001
@@ -539,6 +579,9 @@ enum spd_duplex {
/* OCP_PHY_PATCH_CMD */
#define PATCH_REQUEST BIT(4)
+/* OCP_PHY_LOCK */
+#define PATCH_LOCK BIT(0)
+
/* OCP_ADC_CFG */
#define CKADSEL_L 0x0100
#define ADC_EN 0x0080
@@ -563,6 +606,9 @@ enum spd_duplex {
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* SRAM_PHY_LOCK */
+#define PHY_PATCH_LOCK 0x0001
+
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
#define BND_MASK 0x0004
@@ -570,6 +616,8 @@ enum spd_duplex {
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
+#define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
@@ -622,6 +670,7 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
+ LENOVO_MACPASSTHRU,
};
/* Define these values to match your device */
@@ -736,16 +785,16 @@ struct r8152 {
struct tasklet_struct tx_tl;
struct rtl_ops {
- void (*init)(struct r8152 *);
- int (*enable)(struct r8152 *);
- void (*disable)(struct r8152 *);
- void (*up)(struct r8152 *);
- void (*down)(struct r8152 *);
- void (*unload)(struct r8152 *);
- int (*eee_get)(struct r8152 *, struct ethtool_eee *);
- int (*eee_set)(struct r8152 *, struct ethtool_eee *);
- bool (*in_nway)(struct r8152 *);
- void (*hw_phy_cfg)(struct r8152 *);
+ void (*init)(struct r8152 *tp);
+ int (*enable)(struct r8152 *tp);
+ void (*disable)(struct r8152 *tp);
+ void (*up)(struct r8152 *tp);
+ void (*down)(struct r8152 *tp);
+ void (*unload)(struct r8152 *tp);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ bool (*in_nway)(struct r8152 *tp);
+ void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
@@ -766,6 +815,19 @@ struct r8152 {
u32 ctap_short_off:1;
} ups_info;
+#define RTL_VER_SIZE 32
+
+ struct rtl_fw {
+ const char *fw_name;
+ const struct firmware *fw;
+
+ char version[RTL_VER_SIZE];
+ int (*pre_fw)(struct r8152 *tp);
+ int (*post_fw)(struct r8152 *tp);
+
+ bool retry;
+ } rtl_fw;
+
atomic_t rx_count;
bool eee_en;
@@ -788,6 +850,131 @@ struct r8152 {
u8 autoneg;
};
+/**
+ * struct fw_block - block type and total length
+ * @type: type of the current block, such as RTL_FW_END, RTL_FW_PLA,
+ * RTL_FW_USB and so on.
+ * @length: total length of the current block.
+ */
+struct fw_block {
+ __le32 type;
+ __le32 length;
+} __packed;
+
+/**
+ * struct fw_header - header of the firmware file
+ * @checksum: checksum of sha256 which is calculated from the whole file
+ * except the checksum field of the file. That is, calculate sha256
+ * from the version field to the end of the file.
+ * @version: version of this firmware.
+ * @blocks: the first firmware block of the file
+ */
+struct fw_header {
+ u8 checksum[32];
+ char version[RTL_VER_SIZE];
+ struct fw_block blocks[0];
+} __packed;
+
+/**
+ * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
+ * The layout of the firmware block is:
+ * <struct fw_mac> + <info> + <firmware data>.
+ * @fw_offset: offset of the firmware binary data. The start address of
+ * the data would be the address of struct fw_mac + @fw_offset.
+ * @fw_reg: the register to load the firmware. Depends on chip.
+ * @bp_ba_addr: the register to write break point base address. Depends on
+ * chip.
+ * @bp_ba_value: break point base address. Depends on chip.
+ * @bp_en_addr: the register to write break point enabled mask. Depends
+ * on chip.
+ * @bp_en_value: break point enabled mask. Depends on the firmware.
+ * @bp_start: the start register of break points. Depends on chip.
+ * @bp_num: the break point number which needs to be set for this firmware.
+ * Depends on the firmware.
+ * @bp: break points. Depends on firmware.
+ * @fw_ver_reg: the register to store the fw version.
+ * @fw_ver_data: the firmware version of the current type.
+ * @info: additional information for debugging, and is followed by the
+ * binary data of firmware.
+ */
+struct fw_mac {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ __le16 bp_ba_addr;
+ __le16 bp_ba_value;
+ __le16 bp_en_addr;
+ __le16 bp_en_value;
+ __le16 bp_start;
+ __le16 bp_num;
+ __le16 bp[16]; /* any value determined by firmware */
+ __le32 reserved;
+ __le16 fw_ver_reg;
+ u8 fw_ver_data;
+ char info[0];
+} __packed;
+
+/**
+ * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START.
+ * This is used to set patch key when loading the firmware of PHY.
+ * @key_reg: the register to write the patch key.
+ * @key_data: patch key.
+ */
+struct fw_phy_patch_key {
+ struct fw_block blk_hdr;
+ __le16 key_reg;
+ __le16 key_data;
+ __le32 reserved;
+} __packed;
+
+/**
+ * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC.
+ * The layout of the firmware block is:
+ * <struct fw_phy_nc> + <info> + <firmware data>.
+ * @fw_offset: offset of the firmware binary data. The start address of
+ * the data would be the address of struct fw_phy_nc + @fw_offset.
+ * @fw_reg: the register to load the firmware. Depends on chip.
+ * @ba_reg: the register to write the base address. Depends on chip.
+ * @ba_data: base address. Depends on chip.
+ * @patch_en_addr: the register of enabling patch mode. Depends on chip.
+ * @patch_en_value: patch mode enabled mask. Depends on the firmware.
+ * @mode_reg: the regitster of switching the mode.
+ * @mod_pre: the mode needing to be set before loading the firmware.
+ * @mod_post: the mode to be set when finishing to load the firmware.
+ * @bp_start: the start register of break points. Depends on chip.
+ * @bp_num: the break point number which needs to be set for this firmware.
+ * Depends on the firmware.
+ * @bp: break points. Depends on firmware.
+ * @info: additional information for debugging, and is followed by the
+ * binary data of firmware.
+ */
+struct fw_phy_nc {
+ struct fw_block blk_hdr;
+ __le16 fw_offset;
+ __le16 fw_reg;
+ __le16 ba_reg;
+ __le16 ba_data;
+ __le16 patch_en_addr;
+ __le16 patch_en_value;
+ __le16 mode_reg;
+ __le16 mode_pre;
+ __le16 mode_post;
+ __le16 reserved;
+ __le16 bp_start;
+ __le16 bp_num;
+ __le16 bp[4];
+ char info[0];
+} __packed;
+
+enum rtl_fw_type {
+ RTL_FW_END = 0,
+ RTL_FW_PLA,
+ RTL_FW_USB,
+ RTL_FW_PHY_START,
+ RTL_FW_PHY_STOP,
+ RTL_FW_PHY_NC,
+};
+
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
@@ -1222,38 +1409,52 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
int ret = -EINVAL;
u32 ocp_data;
unsigned char buf[6];
-
- /* test for -AD variant of RTL8153 */
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- if ((ocp_data & AD_MASK) == 0x1000) {
- /* test for MAC address pass-through bit */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
- if ((ocp_data & PASS_THRU_MASK) != 1) {
- netif_dbg(tp, probe, tp->netdev,
- "No efuse for RTL8153-AD MAC pass through\n");
- return -ENODEV;
- }
+ char *mac_obj_name;
+ acpi_object_type mac_obj_type;
+ int mac_strlen;
+
+ if (test_bit(LENOVO_MACPASSTHRU, &tp->flags)) {
+ mac_obj_name = "\\MACA";
+ mac_obj_type = ACPI_TYPE_STRING;
+ mac_strlen = 0x16;
} else {
- /* test for RTL8153-BND and RTL8153-BD */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
- if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
- netif_dbg(tp, probe, tp->netdev,
- "Invalid variant for MAC pass through\n");
- return -ENODEV;
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) == 0x1000) {
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1) {
+ netif_dbg(tp, probe, tp->netdev,
+ "No efuse for RTL8153-AD MAC pass through\n");
+ return -ENODEV;
+ }
+ } else {
+ /* test for RTL8153-BND and RTL8153-BD */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
+ netif_dbg(tp, probe, tp->netdev,
+ "Invalid variant for MAC pass through\n");
+ return -ENODEV;
+ }
}
+
+ mac_obj_name = "\\_SB.AMAC";
+ mac_obj_type = ACPI_TYPE_BUFFER;
+ mac_strlen = 0x17;
}
/* returns _AUXMAC_#AABBCCDDEEFF# */
- status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer);
obj = (union acpi_object *)buffer.pointer;
if (!ACPI_SUCCESS(status))
return -ENODEV;
- if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ if (obj->type != mac_obj_type || obj->string.length != mac_strlen) {
netif_warn(tp, probe, tp->netdev,
"Invalid buffer for pass-thru MAC addr: (%d, %d)\n",
obj->type, obj->string.length);
goto amacout;
}
+
if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
netif_warn(tp, probe, tp->netdev,
@@ -1688,7 +1889,7 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
}
/* r8152_csum_workaround()
- * The hw limites the value the transport offset. When the offset is out of the
+ * The hw limits the value of the transport offset. When the offset is out of
* range, calculate the checksum by sw.
*/
static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
@@ -2178,6 +2379,7 @@ static void tx_bottom(struct r8152 *tp)
int res;
do {
+ struct net_device *netdev = tp->netdev;
struct tx_agg *agg;
if (skb_queue_empty(&tp->tx_queue))
@@ -2188,24 +2390,23 @@ static void tx_bottom(struct r8152 *tp)
break;
res = r8152_tx_agg_fill(tp, agg);
- if (res) {
- struct net_device *netdev = tp->netdev;
+ if (!res)
+ continue;
- if (res == -ENODEV) {
- rtl_set_unplug(tp);
- netif_device_detach(netdev);
- } else {
- struct net_device_stats *stats = &netdev->stats;
- unsigned long flags;
+ if (res == -ENODEV) {
+ rtl_set_unplug(tp);
+ netif_device_detach(netdev);
+ } else {
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned long flags;
- netif_warn(tp, tx_err, netdev,
- "failed tx_urb %d\n", res);
- stats->tx_dropped += agg->skb_num;
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
- spin_lock_irqsave(&tp->tx_lock, flags);
- list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock_irqrestore(&tp->tx_lock, flags);
- }
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
}
} while (res == 0);
}
@@ -3226,6 +3427,688 @@ static void rtl_reset_bmu(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
}
+/* Clear the bp to stop the firmware before loading a new one */
+static void rtl_clear_bp(struct r8152 *tp, u16 type)
+{
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ ocp_write_byte(tp, type, PLA_BP_EN, 0);
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ default:
+ if (type == MCU_TYPE_USB) {
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
+
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_10, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_11, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_12, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_13, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_14, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_15, 0);
+ } else {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ }
+ break;
+ }
+
+ ocp_write_word(tp, type, PLA_BP_0, 0);
+ ocp_write_word(tp, type, PLA_BP_1, 0);
+ ocp_write_word(tp, type, PLA_BP_2, 0);
+ ocp_write_word(tp, type, PLA_BP_3, 0);
+ ocp_write_word(tp, type, PLA_BP_4, 0);
+ ocp_write_word(tp, type, PLA_BP_5, 0);
+ ocp_write_word(tp, type, PLA_BP_6, 0);
+ ocp_write_word(tp, type, PLA_BP_7, 0);
+
+ /* wait 3 ms to make sure the firmware is stopped */
+ usleep_range(3000, 6000);
+ ocp_write_word(tp, type, PLA_BP_BA, 0);
+}
+
+static int r8153_patch_request(struct r8152 *tp, bool request)
+{
+ u16 data;
+ int i;
+
+ data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
+ if (request)
+ data |= PATCH_REQUEST;
+ else
+ data &= ~PATCH_REQUEST;
+ ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+
+ for (i = 0; request && i < 5000; i++) {
+ usleep_range(1000, 2000);
+ if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ break;
+ }
+
+ if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
+ netif_err(tp, drv, tp->netdev, "patch request fail\n");
+ r8153_patch_request(tp, false);
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
+static int r8153_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key)
+{
+ if (r8153_patch_request(tp, true)) {
+ dev_err(&tp->intf->dev, "patch request fail\n");
+ return -ETIME;
+ }
+
+ sram_write(tp, key_addr, patch_key);
+ sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK);
+
+ return 0;
+}
+
+static int r8153_post_ram_code(struct r8152 *tp, u16 key_addr)
+{
+ u16 data;
+
+ sram_write(tp, 0x0000, 0x0000);
+
+ data = ocp_reg_read(tp, OCP_PHY_LOCK);
+ data &= ~PATCH_LOCK;
+ ocp_reg_write(tp, OCP_PHY_LOCK, data);
+
+ sram_write(tp, key_addr, 0x0000);
+
+ r8153_patch_request(tp, false);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+
+ return 0;
+}
+
+static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy)
+{
+ u32 length;
+ u16 fw_offset, fw_reg, ba_reg, patch_en_addr, mode_reg, bp_start;
+ bool rc = false;
+
+ switch (tp->version) {
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ fw_reg = 0xa014;
+ ba_reg = 0xa012;
+ patch_en_addr = 0xa01a;
+ mode_reg = 0xb820;
+ bp_start = 0xa000;
+ break;
+ default:
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(phy->fw_offset);
+ if (fw_offset < sizeof(*phy)) {
+ dev_err(&tp->intf->dev, "fw_offset too small\n");
+ goto out;
+ }
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ if (length < fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= __le16_to_cpu(phy->fw_offset);
+ if (!length || (length & 1)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->fw_reg) != fw_reg) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->ba_reg) != ba_reg) {
+ dev_err(&tp->intf->dev, "invalid base address register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->patch_en_addr) != patch_en_addr) {
+ dev_err(&tp->intf->dev,
+ "invalid patch mode enabled register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->mode_reg) != mode_reg) {
+ dev_err(&tp->intf->dev,
+ "invalid register to switch the mode\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->bp_start) != bp_start) {
+ dev_err(&tp->intf->dev,
+ "invalid start register of break point\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(phy->bp_num) > 4) {
+ dev_err(&tp->intf->dev, "invalid break point number\n");
+ goto out;
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
+{
+ u16 fw_reg, bp_ba_addr, bp_en_addr, bp_start, fw_offset;
+ bool rc = false;
+ u32 length, type;
+ int i, max_bp;
+
+ type = __le32_to_cpu(mac->blk_hdr.type);
+ if (type == RTL_FW_PLA) {
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = 0;
+ bp_start = PLA_BP_0;
+ max_bp = 8;
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ case RTL_VER_08:
+ case RTL_VER_09:
+ fw_reg = 0xf800;
+ bp_ba_addr = PLA_BP_BA;
+ bp_en_addr = PLA_BP_EN;
+ bp_start = PLA_BP_0;
+ max_bp = 8;
+ break;
+ default:
+ goto out;
+ }
+ } else if (type == RTL_FW_USB) {
+ switch (tp->version) {
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ fw_reg = 0xf800;
+ bp_ba_addr = USB_BP_BA;
+ bp_en_addr = USB_BP_EN;
+ bp_start = USB_BP_0;
+ max_bp = 8;
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ fw_reg = 0xe600;
+ bp_ba_addr = USB_BP_BA;
+ bp_en_addr = USB_BP2_EN;
+ bp_start = USB_BP_0;
+ max_bp = 16;
+ break;
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ default:
+ goto out;
+ }
+ } else {
+ goto out;
+ }
+
+ fw_offset = __le16_to_cpu(mac->fw_offset);
+ if (fw_offset < sizeof(*mac)) {
+ dev_err(&tp->intf->dev, "fw_offset too small\n");
+ goto out;
+ }
+
+ length = __le32_to_cpu(mac->blk_hdr.length);
+ if (length < fw_offset) {
+ dev_err(&tp->intf->dev, "invalid fw_offset\n");
+ goto out;
+ }
+
+ length -= fw_offset;
+ if (length < 4 || (length & 3)) {
+ dev_err(&tp->intf->dev, "invalid block length\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->fw_reg) != fw_reg) {
+ dev_err(&tp->intf->dev, "invalid register to load firmware\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_ba_addr) != bp_ba_addr) {
+ dev_err(&tp->intf->dev, "invalid base address register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_en_addr) != bp_en_addr) {
+ dev_err(&tp->intf->dev, "invalid enabled mask register\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_start) != bp_start) {
+ dev_err(&tp->intf->dev,
+ "invalid start register of break point\n");
+ goto out;
+ }
+
+ if (__le16_to_cpu(mac->bp_num) > max_bp) {
+ dev_err(&tp->intf->dev, "invalid break point number\n");
+ goto out;
+ }
+
+ for (i = __le16_to_cpu(mac->bp_num); i < max_bp; i++) {
+ if (mac->bp[i]) {
+ dev_err(&tp->intf->dev, "unused bp%u is not zero\n", i);
+ goto out;
+ }
+ }
+
+ rc = true;
+out:
+ return rc;
+}
+
+/* Verify the checksum for the firmware file. It is calculated from the version
+ * field to the end of the file. Compare the result with the checksum field to
+ * make sure the file is correct.
+ */
+static long rtl8152_fw_verify_checksum(struct r8152 *tp,
+ struct fw_header *fw_hdr, size_t size)
+{
+ unsigned char checksum[sizeof(fw_hdr->checksum)];
+ struct crypto_shash *alg;
+ struct shash_desc *sdesc;
+ size_t len;
+ long rc;
+
+ alg = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(alg)) {
+ rc = PTR_ERR(alg);
+ goto out;
+ }
+
+ if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) {
+ rc = -EFAULT;
+ dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n",
+ crypto_shash_digestsize(alg));
+ goto free_shash;
+ }
+
+ len = sizeof(*sdesc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(len, GFP_KERNEL);
+ if (!sdesc) {
+ rc = -ENOMEM;
+ goto free_shash;
+ }
+ sdesc->tfm = alg;
+
+ len = size - sizeof(fw_hdr->checksum);
+ rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum);
+ kfree(sdesc);
+ if (rc)
+ goto free_shash;
+
+ if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) {
+ dev_err(&tp->intf->dev, "checksum fail\n");
+ rc = -EFAULT;
+ }
+
+free_shash:
+ crypto_free_shash(alg);
+out:
+ return rc;
+}
+
+static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
+{
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_header *fw_hdr = (struct fw_header *)fw->data;
+ struct fw_mac *pla = NULL, *usb = NULL;
+ struct fw_phy_patch_key *start = NULL;
+ struct fw_phy_nc *phy_nc = NULL;
+ struct fw_block *stop = NULL;
+ long ret = -EFAULT;
+ int i;
+
+ if (fw->size < sizeof(*fw_hdr)) {
+ dev_err(&tp->intf->dev, "file too small\n");
+ goto fail;
+ }
+
+ ret = rtl8152_fw_verify_checksum(tp, fw_hdr, fw->size);
+ if (ret)
+ goto fail;
+
+ ret = -EFAULT;
+
+ for (i = sizeof(*fw_hdr); i < fw->size;) {
+ struct fw_block *block = (struct fw_block *)&fw->data[i];
+ u32 type;
+
+ if ((i + sizeof(*block)) > fw->size)
+ goto fail;
+
+ type = __le32_to_cpu(block->type);
+ switch (type) {
+ case RTL_FW_END:
+ if (__le32_to_cpu(block->length) != sizeof(*block))
+ goto fail;
+ goto fw_end;
+ case RTL_FW_PLA:
+ if (pla) {
+ dev_err(&tp->intf->dev,
+ "multiple PLA firmware encountered");
+ goto fail;
+ }
+
+ pla = (struct fw_mac *)block;
+ if (!rtl8152_is_fw_mac_ok(tp, pla)) {
+ dev_err(&tp->intf->dev,
+ "check PLA firmware failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_USB:
+ if (usb) {
+ dev_err(&tp->intf->dev,
+ "multiple USB firmware encountered");
+ goto fail;
+ }
+
+ usb = (struct fw_mac *)block;
+ if (!rtl8152_is_fw_mac_ok(tp, usb)) {
+ dev_err(&tp->intf->dev,
+ "check USB firmware failed\n");
+ goto fail;
+ }
+ break;
+ case RTL_FW_PHY_START:
+ if (start || phy_nc || stop) {
+ dev_err(&tp->intf->dev,
+ "check PHY_START fail\n");
+ goto fail;
+ }
+
+ if (__le32_to_cpu(block->length) != sizeof(*start)) {
+ dev_err(&tp->intf->dev,
+ "Invalid length for PHY_START\n");
+ goto fail;
+ }
+
+ start = (struct fw_phy_patch_key *)block;
+ break;
+ case RTL_FW_PHY_STOP:
+ if (stop || !start) {
+ dev_err(&tp->intf->dev,
+ "Check PHY_STOP fail\n");
+ goto fail;
+ }
+
+ if (__le32_to_cpu(block->length) != sizeof(*block)) {
+ dev_err(&tp->intf->dev,
+ "Invalid length for PHY_STOP\n");
+ goto fail;
+ }
+
+ stop = block;
+ break;
+ case RTL_FW_PHY_NC:
+ if (!start || stop) {
+ dev_err(&tp->intf->dev,
+ "check PHY_NC fail\n");
+ goto fail;
+ }
+
+ if (phy_nc) {
+ dev_err(&tp->intf->dev,
+ "multiple PHY NC encountered\n");
+ goto fail;
+ }
+
+ phy_nc = (struct fw_phy_nc *)block;
+ if (!rtl8152_is_fw_phy_nc_ok(tp, phy_nc)) {
+ dev_err(&tp->intf->dev,
+ "check PHY NC firmware failed\n");
+ goto fail;
+ }
+
+ break;
+ default:
+ dev_warn(&tp->intf->dev, "Unknown type %u is found\n",
+ type);
+ break;
+ }
+
+ /* next block */
+ i += ALIGN(__le32_to_cpu(block->length), 8);
+ }
+
+fw_end:
+ if ((phy_nc || start) && !stop) {
+ dev_err(&tp->intf->dev, "without PHY_STOP\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return ret;
+}
+
+static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
+{
+ u16 mode_reg, bp_index;
+ u32 length, i, num;
+ __le16 *data;
+
+ mode_reg = __le16_to_cpu(phy->mode_reg);
+ sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
+ sram_write(tp, __le16_to_cpu(phy->ba_reg),
+ __le16_to_cpu(phy->ba_data));
+
+ length = __le32_to_cpu(phy->blk_hdr.length);
+ length -= __le16_to_cpu(phy->fw_offset);
+ num = length / 2;
+ data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset));
+
+ ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg));
+ for (i = 0; i < num; i++)
+ ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i]));
+
+ sram_write(tp, __le16_to_cpu(phy->patch_en_addr),
+ __le16_to_cpu(phy->patch_en_value));
+
+ bp_index = __le16_to_cpu(phy->bp_start);
+ num = __le16_to_cpu(phy->bp_num);
+ for (i = 0; i < num; i++) {
+ sram_write(tp, bp_index, __le16_to_cpu(phy->bp[i]));
+ bp_index += 2;
+ }
+
+ sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_post));
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info);
+}
+
+static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac)
+{
+ u16 bp_en_addr, bp_index, type, bp_num, fw_ver_reg;
+ u32 length;
+ u8 *data;
+ int i;
+
+ switch (__le32_to_cpu(mac->blk_hdr.type)) {
+ case RTL_FW_PLA:
+ type = MCU_TYPE_PLA;
+ break;
+ case RTL_FW_USB:
+ type = MCU_TYPE_USB;
+ break;
+ default:
+ return;
+ }
+
+ rtl_clear_bp(tp, type);
+
+ /* Enable backup/restore of MACDBG. This is required after clearing PLA
+ * break points and before applying the PLA firmware.
+ */
+ if (tp->version == RTL_VER_04 && type == MCU_TYPE_PLA &&
+ !(ocp_read_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST) & DEBUG_OE)) {
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_PRE, DEBUG_LTSSM);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST, DEBUG_LTSSM);
+ }
+
+ length = __le32_to_cpu(mac->blk_hdr.length);
+ length -= __le16_to_cpu(mac->fw_offset);
+
+ data = (u8 *)mac;
+ data += __le16_to_cpu(mac->fw_offset);
+
+ generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data,
+ type);
+
+ ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr),
+ __le16_to_cpu(mac->bp_ba_value));
+
+ bp_index = __le16_to_cpu(mac->bp_start);
+ bp_num = __le16_to_cpu(mac->bp_num);
+ for (i = 0; i < bp_num; i++) {
+ ocp_write_word(tp, type, bp_index, __le16_to_cpu(mac->bp[i]));
+ bp_index += 2;
+ }
+
+ bp_en_addr = __le16_to_cpu(mac->bp_en_addr);
+ if (bp_en_addr)
+ ocp_write_word(tp, type, bp_en_addr,
+ __le16_to_cpu(mac->bp_en_value));
+
+ fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg);
+ if (fw_ver_reg)
+ ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg,
+ mac->fw_ver_data);
+
+ dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info);
+}
+
+static void rtl8152_apply_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+ const struct firmware *fw;
+ struct fw_header *fw_hdr;
+ struct fw_phy_patch_key *key;
+ u16 key_addr = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rtl_fw->fw))
+ return;
+
+ fw = rtl_fw->fw;
+ fw_hdr = (struct fw_header *)fw->data;
+
+ if (rtl_fw->pre_fw)
+ rtl_fw->pre_fw(tp);
+
+ for (i = offsetof(struct fw_header, blocks); i < fw->size;) {
+ struct fw_block *block = (struct fw_block *)&fw->data[i];
+
+ switch (__le32_to_cpu(block->type)) {
+ case RTL_FW_END:
+ goto post_fw;
+ case RTL_FW_PLA:
+ case RTL_FW_USB:
+ rtl8152_fw_mac_apply(tp, (struct fw_mac *)block);
+ break;
+ case RTL_FW_PHY_START:
+ key = (struct fw_phy_patch_key *)block;
+ key_addr = __le16_to_cpu(key->key_reg);
+ r8153_pre_ram_code(tp, key_addr,
+ __le16_to_cpu(key->key_data));
+ break;
+ case RTL_FW_PHY_STOP:
+ WARN_ON(!key_addr);
+ r8153_post_ram_code(tp, key_addr);
+ break;
+ case RTL_FW_PHY_NC:
+ rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block);
+ break;
+ default:
+ break;
+ }
+
+ i += ALIGN(__le32_to_cpu(block->length), 8);
+ }
+
+post_fw:
+ if (rtl_fw->post_fw)
+ rtl_fw->post_fw(tp);
+
+ strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
+ dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
+}
+
+static void rtl8152_release_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+
+ if (!IS_ERR_OR_NULL(rtl_fw->fw)) {
+ release_firmware(rtl_fw->fw);
+ rtl_fw->fw = NULL;
+ }
+}
+
+static int rtl8152_request_firmware(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+ long rc;
+
+ if (rtl_fw->fw || !rtl_fw->fw_name) {
+ dev_info(&tp->intf->dev, "skip request firmware\n");
+ rc = 0;
+ goto result;
+ }
+
+ rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, &tp->intf->dev);
+ if (rc < 0)
+ goto result;
+
+ rc = rtl8152_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ release_firmware(rtl_fw->fw);
+
+result:
+ if (rc) {
+ rtl_fw->fw = ERR_PTR(rc);
+
+ dev_warn(&tp->intf->dev,
+ "unable to load firmware patch %s (%ld)\n",
+ rtl_fw->fw_name, rc);
+ }
+
+ return rc;
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@@ -3370,6 +4253,7 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
+ rtl8152_apply_firmware(tp);
rtl_eee_enable(tp, tp->eee_en);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3377,11 +4261,23 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
set_bit(PHY_RESET, &tp->flags);
}
-static void r8152b_exit_oob(struct r8152 *tp)
+static void wait_oob_link_list_ready(struct r8152 *tp)
{
u32 ocp_data;
int i;
+ for (i = 0; i < 1000; i++) {
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ if (ocp_data & LINK_LIST_READY)
+ break;
+ usleep_range(1000, 2000);
+ }
+}
+
+static void r8152b_exit_oob(struct r8152 *tp)
+{
+ u32 ocp_data;
+
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -3399,23 +4295,13 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
rtl8152_nic_reset(tp);
@@ -3457,7 +4343,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
static void r8152b_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
- int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -3469,23 +4354,13 @@ static void r8152b_enter_oob(struct r8152 *tp)
rtl_disable(tp);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
@@ -3506,31 +4381,124 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static int r8153_patch_request(struct r8152 *tp, bool request)
+static int r8153_pre_firmware_1(struct r8152 *tp)
{
- u16 data;
int i;
- data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD);
- if (request)
- data |= PATCH_REQUEST;
- else
- data &= ~PATCH_REQUEST;
- ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data);
+ /* Wait till the WTD timer is ready. It would take at most 104 ms. */
+ for (i = 0; i < 104; i++) {
+ u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
- for (i = 0; request && i < 5000; i++) {
- usleep_range(1000, 2000);
- if (ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)
+ if (!(ocp_data & WTD1_EN))
break;
+ usleep_range(1000, 2000);
}
- if (request && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) {
- netif_err(tp, drv, tp->netdev, "patch request fail\n");
- r8153_patch_request(tp, false);
- return -ETIME;
- } else {
- return 0;
+ return 0;
+}
+
+static int r8153_post_firmware_1(struct r8152 *tp)
+{
+ /* set USB_BP_4 to support USB_SPEED_SUPER only */
+ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER)
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, BP4_SUPER_ONLY);
+
+ /* reset UPHY timer to 36 ms */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16);
+
+ return 0;
+}
+
+static int r8153_pre_firmware_2(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ r8153_pre_firmware_1(tp);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0);
+ ocp_data &= ~FW_FIX_SUSPEND;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data);
+
+ return 0;
+}
+
+static int r8153_post_firmware_2(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ /* enable bp0 if support USB_SPEED_SUPER only */
+ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN);
+ ocp_data |= BIT(0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data);
}
+
+ /* reset UPHY timer to 36 ms */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16);
+
+ /* enable U3P3 check, set the counter to 4 */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, U3P3_CHECK_EN | 4);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0);
+ ocp_data |= FW_FIX_SUSPEND;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data);
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+ ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+
+ return 0;
+}
+
+static int r8153_post_firmware_3(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+ ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ return 0;
+}
+
+static int r8153b_pre_firmware_1(struct r8152 *tp)
+{
+ /* enable fc timer and set timer to 1 second. */
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER,
+ CTRL_TIMER_EN | (1000 / 8));
+
+ return 0;
+}
+
+static int r8153b_post_firmware_1(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ /* enable bp0 for RTL8153-BND */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if (ocp_data & BND_MASK) {
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN);
+ ocp_data |= BIT(0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data);
+ }
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL);
+ ocp_data |= FLOW_CTRL_PATCH_OPT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+ ocp_data |= FC_PATCH_TASK;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1);
+ ocp_data |= FW_IP_RESET_EN;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data);
+
+ return 0;
}
static void r8153_aldps_en(struct r8152 *tp, bool enable)
@@ -3567,6 +4535,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
+ rtl8152_apply_firmware(tp);
+
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
data &= ~CTAP_SHORT_EN;
@@ -3639,6 +4609,8 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
/* disable EEE before updating the PHY parameters */
rtl_eee_enable(tp, false);
+ rtl8152_apply_firmware(tp);
+
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
data = sram_read(tp, SRAM_GREEN_CFG);
@@ -3711,7 +4683,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
r8153_mac_clk_spd(tp, false);
rxdy_gated_en(tp, true);
@@ -3732,23 +4703,13 @@ static void r8153_first_init(struct r8152 *tp)
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
@@ -3773,7 +4734,6 @@ static void r8153_first_init(struct r8152 *tp)
static void r8153_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
- int i;
r8153_mac_clk_spd(tp, true);
@@ -3784,23 +4744,13 @@ static void r8153_enter_oob(struct r8152 *tp)
rtl_disable(tp);
rtl_reset_bmu(tp);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
- for (i = 0; i < 1000; i++) {
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
- if (ocp_data & LINK_LIST_READY)
- break;
- usleep_range(1000, 2000);
- }
+ wait_oob_link_list_ready(tp);
ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
@@ -4187,11 +5137,22 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
mutex_lock(&tp->control);
+ if (rtl8152_request_firmware(tp) == -ENODEV && tp->rtl_fw.retry) {
+ tp->rtl_fw.retry = false;
+ tp->rtl_fw.fw = NULL;
+
+ /* Delay execution in case request_firmware() is not ready yet.
+ */
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, HZ * 10);
+ goto ignore_once;
+ }
+
tp->rtl_ops.hw_phy_cfg(tp);
rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex,
tp->advertising);
+ignore_once:
mutex_unlock(&tp->control);
usb_autopm_put_interface(tp->intf);
@@ -4229,6 +5190,11 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) {
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ rtl_hw_phy_work_func_t(&tp->hw_phy_work.work);
+ }
+
res = alloc_all_mem(tp);
if (res)
goto out;
@@ -4799,10 +5765,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
- mutex_lock(&tp->control);
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
- mutex_unlock(&tp->control);
+ set_ethernet_addr(tp);
return rtl8152_resume(intf);
}
@@ -4876,6 +5841,9 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
strlcpy(info->driver, MODULENAME, sizeof(info->driver));
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
+ if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
+ strlcpy(info->fw_version, tp->rtl_fw.version,
+ sizeof(info->fw_version));
}
static
@@ -5500,6 +6468,47 @@ static int rtl_ops_init(struct r8152 *tp)
return ret;
}
+#define FIRMWARE_8153A_2 "rtl_nic/rtl8153a-2.fw"
+#define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw"
+#define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw"
+#define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw"
+
+MODULE_FIRMWARE(FIRMWARE_8153A_2);
+MODULE_FIRMWARE(FIRMWARE_8153A_3);
+MODULE_FIRMWARE(FIRMWARE_8153A_4);
+MODULE_FIRMWARE(FIRMWARE_8153B_2);
+
+static int rtl_fw_init(struct r8152 *tp)
+{
+ struct rtl_fw *rtl_fw = &tp->rtl_fw;
+
+ switch (tp->version) {
+ case RTL_VER_04:
+ rtl_fw->fw_name = FIRMWARE_8153A_2;
+ rtl_fw->pre_fw = r8153_pre_firmware_1;
+ rtl_fw->post_fw = r8153_post_firmware_1;
+ break;
+ case RTL_VER_05:
+ rtl_fw->fw_name = FIRMWARE_8153A_3;
+ rtl_fw->pre_fw = r8153_pre_firmware_2;
+ rtl_fw->post_fw = r8153_post_firmware_2;
+ break;
+ case RTL_VER_06:
+ rtl_fw->fw_name = FIRMWARE_8153A_4;
+ rtl_fw->post_fw = r8153_post_firmware_3;
+ break;
+ case RTL_VER_09:
+ rtl_fw->fw_name = FIRMWARE_8153B_2;
+ rtl_fw->pre_fw = r8153b_pre_firmware_1;
+ rtl_fw->post_fw = r8153b_post_firmware_1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static u8 rtl_get_version(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
@@ -5607,6 +6616,8 @@ static int rtl8152_probe(struct usb_interface *intf,
if (ret)
goto out;
+ rtl_fw_init(tp);
+
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
@@ -5633,8 +6644,13 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
+ set_bit(LENOVO_MACPASSTHRU, &tp->flags);
+
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
- (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
+ (!strcmp(udev->serial, "000001000000") ||
+ !strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
}
@@ -5677,6 +6693,10 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
+#if IS_BUILTIN(CONFIG_USB_RTL8152)
+ /* Retry in case request_firmware() is not ready yet. */
+ tp->rtl_fw.retry = true;
+#endif
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
@@ -5722,6 +6742,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
+ rtl8152_release_firmware(tp);
free_netdev(tp->netdev);
}
}
@@ -5753,9 +6774,11 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index c5d4a0060124..681e0def6356 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -335,7 +335,7 @@ static void sr_set_multicast(struct net_device *net)
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
- __le16 res;
+ __le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ba98e0971b84..5a635f028bdc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1585,7 +1585,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Don't wait up for transmitted skbs to be freed. */
if (!use_napi) {
skb_orphan(skb);
- nf_reset(skb);
+ nf_reset_ct(skb);
}
/* If running out of space, stop queue to avoid getting packets that we
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index a4b38a980c3c..b8228f50bc94 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -366,7 +366,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
struct neighbour *neigh;
int ret;
- nf_reset(skb);
+ nf_reset_ct(skb);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -459,7 +459,7 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
/* reset skb device */
if (likely(err == 1))
- nf_reset(skb);
+ nf_reset_ct(skb);
else
skb = NULL;
@@ -560,7 +560,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
bool is_v6gw = false;
int ret = -EINVAL;
- nf_reset(skb);
+ nf_reset_ct(skb);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -670,7 +670,7 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
/* reset skb device */
if (likely(err == 1))
- nf_reset(skb);
+ nf_reset_ct(skb);
else
skb = NULL;
@@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_device *dev)
/* similarly, oper state is irrelevant; set to up to avoid confusion */
dev->operstate = IF_OPER_UP;
- netdev_lockdep_set_classes(dev);
return 0;
out_rth:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3d9bcc957f7d..11f5776affb1 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -793,8 +793,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
- const u8 *mac, __u16 state,
+static struct vxlan_fdb *vxlan_fdb_alloc(const u8 *mac, __u16 state,
__be32 src_vni, __u16 ndm_flags)
{
struct vxlan_fdb *f;
@@ -835,7 +834,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return -ENOSPC;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+ f = vxlan_fdb_alloc(mac, state, src_vni, ndm_flags);
if (!f)
return -ENOMEM;
@@ -2487,9 +2486,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
- if (info->options_len &&
- info->key.tun_flags & TUNNEL_VXLAN_OPT)
+ if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ if (info->options_len < sizeof(*md))
+ goto drop;
md = ip_tunnel_info_opts(info);
+ }
ttl = info->key.ttl;
tos = info->key.tos;
label = info->key.label;
@@ -3566,10 +3567,13 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct net_device *remote_dev = NULL;
struct vxlan_fdb *f = NULL;
bool unregister = false;
+ struct vxlan_rdst *dst;
int err;
+ dst = &vxlan->default_dst;
err = vxlan_dev_configure(net, dev, conf, false, extack);
if (err)
return err;
@@ -3577,14 +3581,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
dev->ethtool_ops = &vxlan_ethtool_ops;
/* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ if (!vxlan_addr_any(&dst->remote_ip)) {
err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
+ &dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
NTF_SELF, &f);
if (err)
return err;
@@ -3595,26 +3599,41 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
goto errout;
unregister = true;
+ if (dst->remote_ifindex) {
+ remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
+ if (!remote_dev)
+ goto errout;
+
+ err = netdev_upper_dev_link(remote_dev, dev, extack);
+ if (err)
+ goto errout;
+ }
+
err = rtnl_configure_link(dev, NULL);
if (err)
- goto errout;
+ goto unlink;
if (f) {
- vxlan_fdb_insert(vxlan, all_zeros_mac,
- vxlan->default_dst.remote_vni, f);
+ vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
/* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
if (err) {
vxlan_fdb_destroy(vxlan, f, false, false);
+ if (remote_dev)
+ netdev_upper_dev_unlink(remote_dev, dev);
goto unregister;
}
}
list_add(&vxlan->next, &vn->vxlan_list);
+ if (remote_dev)
+ dst->remote_dev = remote_dev;
return 0;
-
+unlink:
+ if (remote_dev)
+ netdev_upper_dev_unlink(remote_dev, dev);
errout:
/* unregister_netdevice() destroys the default FDB entry with deletion
* notification. But the addition notification was not sent yet, so
@@ -3932,11 +3951,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_rdst *dst = &vxlan->default_dst;
struct net_device *lowerdev;
struct vxlan_config conf;
+ struct vxlan_rdst *dst;
int err;
+ dst = &vxlan->default_dst;
err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
if (err)
return err;
@@ -3946,6 +3966,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ if (dst->remote_dev == lowerdev)
+ lowerdev = NULL;
+
+ err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
+ extack);
+ if (err)
+ return err;
+
/* handle default dst entry */
if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
@@ -3962,6 +3990,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
NTF_SELF, true, extack);
if (err) {
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ netdev_adjacent_change_abort(dst->remote_dev,
+ lowerdev, dev);
return err;
}
}
@@ -3979,6 +4009,11 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
+ netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+ if (lowerdev && lowerdev != dst->remote_dev) {
+ dst->remote_dev = lowerdev;
+ netdev_update_lockdep_key(lowerdev);
+ }
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
return 0;
}
@@ -3991,6 +4026,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
+ if (vxlan->default_dst.remote_dev)
+ netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
}
static size_t vxlan_get_size(const struct net_device *dev)
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 73f5892ce6c1..1c640b41ea4c 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -26,7 +26,7 @@ int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
*val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_netdev_queue_stopped,
debugfs_netdev_queue_stopped_get,
NULL, "%llu\n");
@@ -154,7 +154,7 @@ int debugfs_i2400m_suspend_set(void *data, u64 val)
result = 0;
return result;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_suspend,
NULL, debugfs_i2400m_suspend_set,
"%llu\n");
@@ -183,7 +183,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
}
return result;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_reset,
NULL, debugfs_i2400m_reset_set,
"%llu\n");
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 8efb493ceec2..5c79f052cad2 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -127,12 +127,12 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
"%d\n", result);
result = 0;
error_cmd:
- kfree(cmd);
kfree_skb(ack_skb);
error_msg_to_dev:
error_alloc:
d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
wimax_dev, state, result);
+ kfree(cmd);
return result;
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 6953f904232f..9659f9e1aaa6 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -511,7 +511,7 @@ error_alloc_netdev:
/*
- * Disconect a i2400m from the system.
+ * Disconnect a i2400m from the system.
*
* i2400m_stop() has been called before, so al the rx and tx contexts
* have been taken down already. Make sure the queue is stopped,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 2fe12b0de5b4..42f00a2a8c80 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1037,7 +1037,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
}
/*
- * Configire PCIE after Ini init. SERDES values now come from ini file
+ * Configure PCIE after Ini init. SERDES values now come from ini file
* This enables PCIe low power mode.
*/
array = power_off ? &ah->iniPcieSerdes :
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index a82ad739ab80..791f6633667c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1674,7 +1674,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
if (!ret)
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 34121fbf32e3..0548aa3702e3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1921,7 +1921,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 40a8054f8aa6..5914926a5c5b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1449,8 +1449,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
rcu_assign_pointer(sta_info->agg[tid], tid_info);
spin_unlock_bh(&ar->tx_ampdu_list_lock);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 79998a3ddb7a..a276dae30887 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1084,6 +1084,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
+ int ret = 0;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@@ -1106,7 +1107,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
spin_unlock_bh(&sta_priv->ampdu_lock);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
spin_lock_bh(&sta_priv->ampdu_lock);
@@ -1131,7 +1132,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
mutex_unlock(&wcn->conf_mutex);
- return 0;
+ return ret;
}
static const struct ieee80211_ops wcn36xx_ops = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 6188275b17e5..8e8b685cfe09 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -850,8 +850,7 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
"START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index ffb705b18fb1..51fdd7ce30af 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -2265,7 +2265,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
if (tid_data->tfds_in_queue == 0) {
D_HT("HW queue is empty\n");
tid_data->agg.state = IL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
D_HT("HW queue is NOT empty: %d packets in HW queue\n",
tid_data->tfds_in_queue);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 3029e3f6de63..cd73fc5cfcbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -621,7 +621,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
tid_data->agg.ssn);
tid_data->agg.state = IWL_AGG_STARTING;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
"next_reclaimed = %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 7573af2d88ce..c2db758b9d54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -162,12 +162,13 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
@@ -224,12 +225,13 @@ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 39c64850cb6f..c0750ced5ac2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -520,7 +520,7 @@ struct iwl_scan_dwell {
} __packed;
/**
- * struct iwl_scan_config
+ * struct iwl_scan_config_v1
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
@@ -552,7 +552,7 @@ struct iwl_scan_config_v1 {
#define SCAN_LB_LMAC_IDX 0
#define SCAN_HB_LMAC_IDX 1
-struct iwl_scan_config {
+struct iwl_scan_config_v2 {
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
@@ -564,6 +564,24 @@ struct iwl_scan_config {
u8 bcast_sta_id;
u8 channel_flags;
u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscouos_mode: whether to enable promiscouos mode
+ * @bcast_sta_id: the index of the station in the fw
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwl_scan_config {
+ u8 enable_cam_mode;
+ u8 enable_promiscouos_mode;
+ u8 bcast_sta_id;
+ u8 reserved;
+ __le32 tx_chains;
+ __le32 rx_chains;
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 007a722cf1c6..ed90dd104366 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -646,6 +646,7 @@ static struct scatterlist *alloc_sgtable(int size)
if (new_page)
__free_page(new_page);
}
+ kfree(table);
return NULL;
}
alloc_size = min_t(int, size, PAGE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 6de20e76b63c..1554f5fdd483 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -287,6 +287,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* STA_CONTEXT_DOT11AX_API_S
* @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
* version tables.
+ * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ * SCAN_CONFIG_DB_CMD_API_S.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -320,6 +322,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
+ IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56,
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index cb4c5514a556..695bbaa86273 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -279,6 +279,7 @@
* Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources.
*/
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 0c12df558240..05c1c77c88a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -148,7 +148,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*
* Bits 3:0:
* Define the maximum number of pending read requests.
- * Maximum configration value allowed is 0xC
+ * Maximum configuration value allowed is 0xC
* Bits 9:8:
* Define the maximum transfer size. (64 / 128 / 256)
* Bit 10:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index f8e4f0f5de0c..f09e368c7040 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -112,38 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
*/
static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return ofs + trans->cfg->trans.umac_prph_offset;
+ return ofs + trans->trans_cfg->umac_prph_offset;
}
static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
{
return iwl_read_prph_no_grab(trans, ofs +
- trans->cfg->trans.umac_prph_offset);
+ trans->trans_cfg->umac_prph_offset);
}
static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset);
+ return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
}
static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph_no_grab(trans, ofs + trans->cfg->trans.umac_prph_offset,
+ iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset,
val);
}
static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph(trans, ofs + trans->cfg->trans.umac_prph_offset, val);
+ iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val);
}
static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
return iwl_poll_prph_bit(trans, addr +
- trans->cfg->trans.umac_prph_offset,
+ trans->trans_cfg->umac_prph_offset,
bits, mask, timeout);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index de5ae35d84ca..14c8ba23f3b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -456,6 +456,11 @@ enum {
#define PERSISTENCE_BIT BIT(12)
#define PREG_WFPM_ACCESS BIT(12)
+#define HPM_HIPM_GEN_CFG 0xA03458
+#define HPM_HIPM_GEN_CFG_CR_PG_EN BIT(0)
+#define HPM_HIPM_GEN_CFG_CR_SLP_EN BIT(1)
+#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE BIT(10)
+
#define UREG_DOORBELL_TO_ISR6 0xA05C04
#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 32a5e4e5461f..d9eb2b286438 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -420,6 +420,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
};
int ret;
+ if (mvm->trans->cfg->tx_with_siso_diversity)
+ init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+
lockdep_assert_held(&mvm->mutex);
mvm->rfkill_safe_init_done = false;
@@ -694,12 +697,13 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
@@ -731,13 +735,14 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
- (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+ (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
+ tbl_rev != 0) {
ret = -EINVAL;
goto out_free;
}
@@ -791,11 +796,16 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
mvm->geo_rev = tbl_rev;
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
@@ -889,15 +899,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
* firmware versions. Unfortunately, we don't have a TLV API
* flag to rely on, so rely on the major version which is in
* the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to29 and 17.
- * The intention was to have it in 36 as well, but not all
- * 8000 family got this feature enabled. The 8000 family is
- * the only one using version 36, so skip this version
- * entirely.
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
+ (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
+ ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -1020,11 +1032,16 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
- if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
enabled = &wifi_pkg->package.elements[1];
if (enabled->type != ACPI_TYPE_INTEGER ||
(enabled->integer.value != 0 && enabled->integer.value != 1)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index e879c02ca4db..61c33b9335e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4903,11 +4903,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
if (!iwl_mvm_has_new_rx_api(mvm))
return;
- notif->cookie = mvm->queue_sync_cookie;
-
- if (notif->sync)
+ if (notif->sync) {
+ notif->cookie = mvm->queue_sync_cookie;
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
+ }
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
size, !notif->sync);
@@ -4927,7 +4927,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
out:
atomic_set(&mvm->queue_sync_counter, 0);
- mvm->queue_sync_cookie++;
+ if (notif->sync)
+ mvm->queue_sync_cookie++;
}
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index a25712cce4ab..735436f5253f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1409,6 +1409,13 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
}
+
+static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
+}
+
static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 2f92f6516415..0de5cf1c519d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1128,11 +1128,11 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags,
- u32 max_channels)
+static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
{
- struct iwl_scan_config *cfg = config;
+ struct iwl_scan_config_v2 *cfg = config;
cfg->flags = cpu_to_le32(flags);
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1176,7 +1176,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
{
void *cfg;
int ret, cmd_size;
@@ -1208,7 +1208,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
}
if (iwl_mvm_cdb_scan_api(mvm))
- cmd_size = sizeof(struct iwl_scan_config);
+ cmd_size = sizeof(struct iwl_scan_config_v2);
else
cmd_size = sizeof(struct iwl_scan_config_v1);
cmd_size += num_channels;
@@ -1245,8 +1245,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
- iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
- num_channels);
+ iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
+ num_channels);
} else {
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
num_channels);
@@ -1268,6 +1268,30 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return ret;
}
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_config cfg;
+ struct iwl_host_cmd cmd = {
+ .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+ .len[0] = sizeof(cfg),
+ .data[0] = &cfg,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
+ return iwl_mvm_legacy_config_scan(mvm);
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
{
int i;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 0bedba4c61f2..7b35f416404c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1482,6 +1482,13 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
mvm_sta->sta_id, i);
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
i, wdg);
+ /*
+ * on failures, just set it to IWL_MVM_INVALID_QUEUE
+ * to try again later, we have no other good way of
+ * failing here
+ */
+ if (txq_id < 0)
+ txq_id = IWL_MVM_INVALID_QUEUE;
tid_data->txq_id = txq_id;
/*
@@ -1950,30 +1957,73 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
sta->sta_id = IWL_MVM_INVALID_STA;
}
-static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
u8 sta_id, u8 fifo)
{
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
mvm->trans->trans_cfg->base_params->wd_timeout :
IWL_WATCHDOG_DISABLED;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = fifo,
+ .sta_id = sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+}
+
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
+{
+ unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+ mvm->trans->trans_cfg->base_params->wd_timeout :
+ IWL_WATCHDOG_DISABLED;
+
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
+ wdg_timeout);
+}
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
+ int maccolor,
+ struct iwl_mvm_int_sta *sta,
+ u16 *queue, int fifo)
+{
+ int ret;
+
+ /* Map queue to fifo - needs to happen before adding station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+ if (ret) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_disable_txq(mvm, NULL, *queue,
+ IWL_MAX_TID_COUNT, 0);
+ return ret;
+ }
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
if (iwl_mvm_has_new_tx_api(mvm)) {
- int tvqm_queue =
- iwl_mvm_tvqm_enable_txq(mvm, sta_id,
- IWL_MAX_TID_COUNT,
- wdg_timeout);
- *queue = tvqm_queue;
- } else {
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = fifo,
- .sta_id = sta_id,
- .tid = IWL_MAX_TID_COUNT,
- .aggregate = false,
- .frame_limit = IWL_FRAME_LIMIT,
- };
+ int txq;
+
+ txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
+ if (txq < 0) {
+ iwl_mvm_rm_sta_common(mvm, sta->sta_id);
+ return txq;
+ }
- iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+ *queue = txq;
}
+
+ return 0;
}
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
@@ -1989,59 +2039,26 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
if (ret)
return ret;
- /* Map Aux queue to fifo - needs to happen before adding Aux station */
- if (!iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
- mvm->aux_sta.sta_id,
- IWL_MVM_TX_FIFO_MCAST);
-
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
- MAC_INDEX_AUX, 0);
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+ &mvm->aux_sta, &mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST);
if (ret) {
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
return ret;
}
- /*
- * For 22000 firmware and on we cannot add queue to a station unknown
- * to firmware so enable queue here - after the station was added
- */
- if (iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
- mvm->aux_sta.sta_id,
- IWL_MVM_TX_FIFO_MCAST);
-
return 0;
}
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ret;
lockdep_assert_held(&mvm->mutex);
- /* Map snif queue to fifo - must happen before adding snif station */
- if (!iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
- mvm->snif_sta.sta_id,
- IWL_MVM_TX_FIFO_BE);
-
- ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
- mvmvif->id, 0);
- if (ret)
- return ret;
-
- /*
- * For 22000 firmware and on we cannot add queue to a station unknown
- * to firmware so enable queue here - after the station was added
- */
- if (iwl_mvm_has_new_tx_api(mvm))
- iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
- mvm->snif_sta.sta_id,
+ return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+ &mvm->snif_sta, &mvm->snif_queue,
IWL_MVM_TX_FIFO_BE);
-
- return 0;
}
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2133,6 +2150,10 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
+ if (queue < 0) {
+ iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ return queue;
+ }
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
@@ -2307,10 +2328,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
mvmvif->id, mvmvif->color);
- if (ret) {
- iwl_mvm_dealloc_int_sta(mvm, msta);
- return ret;
- }
+ if (ret)
+ goto err;
/*
* Enable cab queue after the ADD_STA command is sent.
@@ -2323,6 +2342,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
0,
timeout);
+ if (queue < 0) {
+ ret = queue;
+ goto err;
+ }
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
@@ -2330,6 +2353,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
timeout);
return 0;
+err:
+ iwl_mvm_dealloc_int_sta(mvm, msta);
+ return ret;
}
static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
@@ -2818,13 +2844,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (normalized_ssn == tid_data->next_reclaimed) {
tid_data->state = IWL_AGG_STARTING;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ ret = 0;
}
- ret = 0;
-
out:
spin_unlock_bh(&mvmsta->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index d2274251921d..a4e09a5b1816 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -158,13 +158,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
- if (ret) {
- dma_free_coherent(trans->dev,
- sizeof(*prph_scratch),
- prph_scratch,
- trans_pcie->prph_scratch_dma_addr);
- return ret;
- }
+ if (ret)
+ goto err_free_prph_scratch;
+
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
@@ -172,16 +168,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
- if (!prph_info)
- return -ENOMEM;
+ if (!prph_info) {
+ ret = -ENOMEM;
+ goto err_free_prph_scratch;
+ }
/* Allocate context info */
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
sizeof(*ctxt_info_gen3),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL);
- if (!ctxt_info_gen3)
- return -ENOMEM;
+ if (!ctxt_info_gen3) {
+ ret = -ENOMEM;
+ goto err_free_prph_info;
+ }
ctxt_info_gen3->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
@@ -237,6 +237,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0;
+
+err_free_prph_info:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_info),
+ prph_info,
+ trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+
}
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e29c47744ef5..040cec17d3ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
@@ -571,20 +573,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
@@ -601,7 +603,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
@@ -616,33 +618,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
@@ -671,6 +673,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
@@ -726,62 +729,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
@@ -821,34 +822,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
@@ -1067,11 +1068,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- ((cfg != &iwl_ax200_cfg_cc &&
- cfg != &killer1650x_2ax_cfg &&
- cfg != &killer1650w_2ax_cfg &&
- cfg != &iwl_ax201_cfg_quz_hr) ||
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
u32 hw_status;
hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index df8455f14e4d..ca3bb4d65b00 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -57,6 +57,24 @@
#include "internal.h"
#include "fw/dbg.h"
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ udelay(20);
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_PG_EN |
+ HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ udelay(20);
+ iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+ iwl_trans_sw_reset(trans);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ return 0;
+}
+
/*
* Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -92,6 +110,13 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->cfg->integrated) {
+ ret = iwl_pcie_gen2_force_power_gating(trans);
+ if (ret)
+ return ret;
+ }
+
ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index cd1091704de0..6aa89d9ea72c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3293,11 +3293,17 @@ static struct iwl_trans_dump_data
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) {
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u8 tfdidx;
u32 caplen, cmdlen;
+ if (trans->trans_cfg->use_tfh)
+ tfdidx = idx;
+ else
+ tfdidx = ptr;
+
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
- cmdq->tfds +
- tfd_size * ptr);
+ (u8 *)cmdq->tfds +
+ tfd_size * tfdidx);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
@@ -3471,6 +3477,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!trans_pcie->rba.alloc_wq) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->tso_hdr_page) {
ret = -ENOMEM;
@@ -3605,10 +3620,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
mutex_init(&trans_pcie->fw_mon_data.mutex);
@@ -3622,6 +3633,8 @@ out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 158a3d762e55..e323e9a5999f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
}
}
-
-/*
- * HostAP uses two layers of net devices, where the inner
- * layer gets called all the time from the outer layer.
- * This is a natural nesting, which needs a split lock type.
- */
-static struct lock_class_key hostap_netdev_xmit_lock_key;
-static struct lock_class_key hostap_netdev_addr_lock_key;
-
-static void prism2_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &hostap_netdev_xmit_lock_key);
-}
-
-static void prism2_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &hostap_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
-}
-
static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
struct device *sdev)
@@ -3223,7 +3199,6 @@ while (0)
if (ret >= 0)
ret = register_netdevice(dev);
- prism2_set_lockdep_class(dev);
rtnl_unlock();
if (ret < 0) {
printk(KERN_WARNING "%s: register netdevice failed!\n",
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 635956024e88..31ae6c4be3a7 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -148,23 +148,25 @@ static const char *hwsim_alpha2s[] = {
};
static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = {
- .n_reg_rules = 4,
+ .n_reg_rules = 5,
.alpha2 = "99",
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(2484-10, 2484+10, 40, 0, 20, 0),
REG_RULE(5150-10, 5240+10, 40, 0, 30, 0),
REG_RULE(5745-10, 5825+10, 40, 0, 30, 0),
+ REG_RULE(5855-10, 5925+10, 40, 0, 33, 0),
}
};
static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = {
- .n_reg_rules = 2,
+ .n_reg_rules = 3,
.alpha2 = "99",
.reg_rules = {
REG_RULE(2412-10, 2462+10, 40, 0, 20, 0),
REG_RULE(5725-10, 5850+10, 40, 0, 30,
NL80211_RRF_NO_IR),
+ REG_RULE(5855-10, 5925+10, 40, 0, 33, 0),
}
};
@@ -354,6 +356,24 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5805), /* Channel 161 */
CHAN5G(5825), /* Channel 165 */
CHAN5G(5845), /* Channel 169 */
+
+ CHAN5G(5855), /* Channel 171 */
+ CHAN5G(5860), /* Channel 172 */
+ CHAN5G(5865), /* Channel 173 */
+ CHAN5G(5870), /* Channel 174 */
+
+ CHAN5G(5875), /* Channel 175 */
+ CHAN5G(5880), /* Channel 176 */
+ CHAN5G(5885), /* Channel 177 */
+ CHAN5G(5890), /* Channel 178 */
+ CHAN5G(5895), /* Channel 179 */
+ CHAN5G(5900), /* Channel 180 */
+ CHAN5G(5905), /* Channel 181 */
+
+ CHAN5G(5910), /* Channel 182 */
+ CHAN5G(5915), /* Channel 183 */
+ CHAN5G(5920), /* Channel 184 */
+ CHAN5G(5925), /* Channel 185 */
};
static const struct ieee80211_rate hwsim_rates[] = {
@@ -1261,8 +1281,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
skb_orphan(skb);
skb_dst_drop(skb);
skb->mark = 0;
- secpath_reset(skb);
- nf_reset(skb);
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
/*
* Get absolute mactime here so all HWs RX at the "same time", and
@@ -1550,7 +1570,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_ADHOC)
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ vif->type != NL80211_IFTYPE_OCB)
return;
skb = ieee80211_beacon_get(hw, vif);
@@ -1604,6 +1625,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
}
static const char * const hwsim_chanwidths[] = {
+ [NL80211_CHAN_WIDTH_5] = "ht5",
+ [NL80211_CHAN_WIDTH_10] = "ht10",
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
[NL80211_CHAN_WIDTH_20] = "ht20",
[NL80211_CHAN_WIDTH_40] = "ht40",
@@ -1979,8 +2002,7 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@@ -2723,7 +2745,8 @@ static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_P2P_CLIENT) | \
BIT(NL80211_IFTYPE_P2P_GO) | \
BIT(NL80211_IFTYPE_ADHOC) | \
- BIT(NL80211_IFTYPE_MESH_POINT))
+ BIT(NL80211_IFTYPE_MESH_POINT) | \
+ BIT(NL80211_IFTYPE_OCB))
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
@@ -2847,6 +2870,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
+ BIT(NL80211_CHAN_WIDTH_5) |
+ BIT(NL80211_CHAN_WIDTH_10) |
BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
@@ -4026,7 +4051,7 @@ static int __init init_mac80211_hwsim(void)
err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
if (err < 0) {
rtnl_unlock();
- goto out_free_radios;
+ goto out_free_mon;
}
err = register_netdevice(hwsim_mon);
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index c4db6417748f..d55f229abeea 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5520,7 +5520,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rc = -EBUSY;
break;
}
- ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+ rc = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 4d03596e891f..d7a1ddc9e407 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -8,6 +8,8 @@ mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
tx.o agg-rx.o mcu.o
+mt76-$(CONFIG_PCI) += pci.o
+
mt76-usb-y := usb.o usb_trace.o
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index c747eb24581c..8f69d00bd940 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -53,8 +53,10 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
u32 ctrl;
int i, idx = -1;
- if (txwi)
+ if (txwi) {
q->entry[q->head].txwi = DMA_DUMMY_DATA;
+ q->entry[q->head].skip_buf0 = true;
+ }
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@@ -97,7 +99,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
u32 ctrl = le32_to_cpu(__ctrl);
- if (!e->txwi || !e->skb) {
+ if (!e->skip_buf0) {
__le32 addr = READ_ONCE(q->desc[idx].buf0);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 570c159515a0..8aec7ccf2d79 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -93,8 +93,9 @@ struct mt76_queue_entry {
struct urb *urb;
};
enum mt76_txq_id qid;
- bool schedule;
- bool done;
+ bool skip_buf0:1;
+ bool schedule:1;
+ bool done:1;
};
struct mt76_queue_regs {
@@ -578,6 +579,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
static inline u16 mt76_chip(struct mt76_dev *dev)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 25d5b1608bc9..4b3217b43a04 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -582,8 +582,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 87c748715b5d..b6d78212306a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -477,8 +477,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
mt7615_mcu_set_tx_ba(dev, params, 0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index aec73a0295e8..414b22399d93 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -393,8 +393,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 73c3104f8858..cf611d1b817c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -81,6 +81,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+ mt76_pci_disable_aspm(pdev);
+
return 0;
error:
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644
index 000000000000..04c5a692bc85
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/pci.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_conf, parent_aspm_conf = 0;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+ aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspm_conf);
+ parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+ /* aspm already disabled */
+ return;
+ }
+
+ dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+ (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+ (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+ if (IS_ENABLED(CONFIG_PCIEASPM)) {
+ int err;
+
+ err = pci_disable_link_state(pdev, aspm_conf);
+ if (!err)
+ return;
+ }
+
+ /* both device and parent should have the same ASPM setting.
+ * disable ASPM in downstream component first and then upstream.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 72e608cc53af..671d8897ae76 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -372,8 +372,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_START:
msta->agg_ssn[tid] = ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index f1cdcd61c54a..25466454b73e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -10476,7 +10476,7 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* when the hw reorders frames due to aggregation.
*/
if (sta_priv->wcid > WCID_END)
- return 1;
+ return -ENOSPC;
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -10489,7 +10489,7 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
break;
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 2b216edd0c7d..a90a518b40d3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -23,7 +23,6 @@
#include <linux/leds.h>
#include <linux/mutex.h>
#include <linux/etherdevice.h>
-#include <linux/input-polldev.h>
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
#include <linux/average.h>
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 4d4e3888ef20..f2395309ec00 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -555,7 +555,7 @@ static ssize_t rt2x00debug_write_restart_hw(struct file *file,
{
struct rt2x00debug_intf *intf = file->private_data;
struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
- static unsigned long last_reset;
+ static unsigned long last_reset = INITIAL_JIFFIES;
if (!rt2x00_has_cap_restart_hw(rt2x00dev))
return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ac746c322554..c75192c4447f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1776,8 +1776,7 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
tid_data->agg.agg_state = RTL_AGG_START;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- return 0;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
}
int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 6087ec7a90a6..f88d26535978 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
hdr = rtl_get_hdr(skb);
fc = rtl_get_fc(skb);
- if (!stats.crc && !stats.hwerror) {
+ if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
sizeof(rx_status));
@@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
_rtl_pci_rx_to_mac80211(hw, skb, rx_status);
}
} else {
+ /* drop packets with errors or those too short */
dev_kfree_skb_any(skb);
}
new_trx_end:
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index cbf5f186f48b..e5e1ec5a41dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -750,6 +750,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
return;
} else {
noa_num = (noa_len - 2) / 13;
+ if (noa_num > P2P_MAX_NOA_NUM)
+ noa_num = P2P_MAX_NOA_NUM;
+
}
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -844,6 +847,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
return;
} else {
noa_num = (noa_len - 2) / 13;
+ if (noa_num > P2P_MAX_NOA_NUM)
+ noa_num = P2P_MAX_NOA_NUM;
+
}
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 6d6e8994460d..81313e0ca834 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1352,9 +1352,9 @@ static void _rtl92s_phy_set_rfhalt(struct ieee80211_hw *hw)
/* SW/HW radio off or halt adapter!! For example S3/S4 */
} else {
/* LED function disable. Power range is about 8mA now. */
- /* if write 0xF1 disconnet_pci power
+ /* if write 0xF1 disconnect_pci power
* ifconfig wlan0 down power are both high 35:70 */
- /* if write oxF9 disconnet_pci power
+ /* if write oxF9 disconnect_pci power
* ifconfig wlan0 down power are both low 12:45*/
rtl_write_byte(rtlpriv, 0x03, 0xF9);
}
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 2247bd61e716..34a1c3b53cd4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -572,8 +572,7 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index ce5e92d82efc..440088293aff 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1140,8 +1140,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
else if ((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO))
rsta->seq_start[tid] = seq_no;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- status = 0;
+ status = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index d4c09e54fd63..18c4d998ce4b 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -186,7 +186,7 @@ static void wl12xx_spi_init(struct device *child)
spi_sync(to_spi_device(glue->dev), &m);
- /* Restore chip select configration to normal */
+ /* Restore chip select configuration to normal */
spi->mode ^= SPI_CS_HIGH;
kfree(cmd);
}
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index be92e1220284..7997cc6de334 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -548,6 +548,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
priv->is_connected = false;
priv->is_up = false;
INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
+ __module_get(THIS_MODULE);
return 0;
unregister_netdev:
@@ -578,6 +579,7 @@ static void virt_wifi_dellink(struct net_device *dev,
netdev_upper_dev_unlink(priv->lowerdev, dev);
unregister_netdevice_queue(dev, head);
+ module_put(THIS_MODULE);
/* Deleting the wiphy is handled in the module destructor. */
}
@@ -590,6 +592,42 @@ static struct rtnl_link_ops virt_wifi_link_ops = {
.priv_size = sizeof(struct virt_wifi_netdev_priv),
};
+static bool netif_is_virt_wifi_dev(const struct net_device *dev)
+{
+ return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler;
+}
+
+static int virt_wifi_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr);
+ struct virt_wifi_netdev_priv *priv;
+ struct net_device *upper_dev;
+ LIST_HEAD(list_kill);
+
+ if (!netif_is_virt_wifi_dev(lower_dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ priv = rtnl_dereference(lower_dev->rx_handler_data);
+ if (!priv)
+ return NOTIFY_DONE;
+
+ upper_dev = priv->upperdev;
+
+ upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block virt_wifi_notifier = {
+ .notifier_call = virt_wifi_event,
+};
+
/* Acquires and releases the rtnl lock. */
static int __init virt_wifi_init_module(void)
{
@@ -598,14 +636,25 @@ static int __init virt_wifi_init_module(void)
/* Guaranteed to be locallly-administered and not multicast. */
eth_random_addr(fake_router_bssid);
+ err = register_netdevice_notifier(&virt_wifi_notifier);
+ if (err)
+ return err;
+
+ err = -ENOMEM;
common_wiphy = virt_wifi_make_wiphy();
if (!common_wiphy)
- return -ENOMEM;
+ goto notifier;
err = rtnl_link_register(&virt_wifi_link_ops);
if (err)
- virt_wifi_destroy_wiphy(common_wiphy);
+ goto destroy_wiphy;
+ return 0;
+
+destroy_wiphy:
+ virt_wifi_destroy_wiphy(common_wiphy);
+notifier:
+ unregister_netdevice_notifier(&virt_wifi_notifier);
return err;
}
@@ -615,6 +664,7 @@ static void __exit virt_wifi_cleanup_module(void)
/* Will delete any devices that depend on the wiphy. */
rtnl_link_unregister(&virt_wifi_link_ops);
virt_wifi_destroy_wiphy(common_wiphy);
+ unregister_netdevice_notifier(&virt_wifi_notifier);
}
module_init(virt_wifi_init_module);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 240f762b3749..68dd7bb07ca6 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -626,6 +626,38 @@ err:
return err;
}
+static void xenvif_disconnect_queue(struct xenvif_queue *queue)
+{
+ if (queue->tx_irq) {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ if (queue->tx_irq == queue->rx_irq)
+ queue->rx_irq = 0;
+ queue->tx_irq = 0;
+ }
+
+ if (queue->rx_irq) {
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ queue->rx_irq = 0;
+ }
+
+ if (queue->task) {
+ kthread_stop(queue->task);
+ queue->task = NULL;
+ }
+
+ if (queue->dealloc_task) {
+ kthread_stop(queue->dealloc_task);
+ queue->dealloc_task = NULL;
+ }
+
+ if (queue->napi.poll) {
+ netif_napi_del(&queue->napi);
+ queue->napi.poll = NULL;
+ }
+
+ xenvif_unmap_frontend_data_rings(queue);
+}
+
int xenvif_connect_data(struct xenvif_queue *queue,
unsigned long tx_ring_ref,
unsigned long rx_ring_ref,
@@ -651,13 +683,27 @@ int xenvif_connect_data(struct xenvif_queue *queue,
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
+ queue->stalled = true;
+
+ task = kthread_run(xenvif_kthread_guest_rx, queue,
+ "%s-guest-rx", queue->name);
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->task = task;
+
+ task = kthread_run(xenvif_dealloc_kthread, queue,
+ "%s-dealloc", queue->name);
+ if (IS_ERR(task))
+ goto kthread_err;
+ queue->dealloc_task = task;
+
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
- goto err_unmap;
+ goto err;
queue->tx_irq = queue->rx_irq = err;
disable_irq(queue->tx_irq);
} else {
@@ -668,7 +714,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
- goto err_unmap;
+ goto err;
queue->tx_irq = err;
disable_irq(queue->tx_irq);
@@ -678,48 +724,18 @@ int xenvif_connect_data(struct xenvif_queue *queue,
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
- goto err_tx_unbind;
+ goto err;
queue->rx_irq = err;
disable_irq(queue->rx_irq);
}
- queue->stalled = true;
-
- task = kthread_create(xenvif_kthread_guest_rx,
- (void *)queue, "%s-guest-rx", queue->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", queue->name);
- err = PTR_ERR(task);
- goto err_rx_unbind;
- }
- queue->task = task;
- get_task_struct(task);
-
- task = kthread_create(xenvif_dealloc_kthread,
- (void *)queue, "%s-dealloc", queue->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", queue->name);
- err = PTR_ERR(task);
- goto err_rx_unbind;
- }
- queue->dealloc_task = task;
-
- wake_up_process(queue->task);
- wake_up_process(queue->dealloc_task);
-
return 0;
-err_rx_unbind:
- unbind_from_irqhandler(queue->rx_irq, queue);
- queue->rx_irq = 0;
-err_tx_unbind:
- unbind_from_irqhandler(queue->tx_irq, queue);
- queue->tx_irq = 0;
-err_unmap:
- xenvif_unmap_frontend_data_rings(queue);
- netif_napi_del(&queue->napi);
+kthread_err:
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
+ err = PTR_ERR(task);
err:
- module_put(THIS_MODULE);
+ xenvif_disconnect_queue(queue);
return err;
}
@@ -747,30 +763,7 @@ void xenvif_disconnect_data(struct xenvif *vif)
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
- netif_napi_del(&queue->napi);
-
- if (queue->task) {
- kthread_stop(queue->task);
- put_task_struct(queue->task);
- queue->task = NULL;
- }
-
- if (queue->dealloc_task) {
- kthread_stop(queue->dealloc_task);
- queue->dealloc_task = NULL;
- }
-
- if (queue->tx_irq) {
- if (queue->tx_irq == queue->rx_irq)
- unbind_from_irqhandler(queue->tx_irq, queue);
- else {
- unbind_from_irqhandler(queue->tx_irq, queue);
- unbind_from_irqhandler(queue->rx_irq, queue);
- }
- queue->tx_irq = 0;
- }
-
- xenvif_unmap_frontend_data_rings(queue);
+ xenvif_disconnect_queue(queue);
}
xenvif_mcast_addr_list_free(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e14ec75b61d6..482c6c8b0fb7 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return 0;
}
-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
- struct sk_buff *skb,
- struct sk_buff_head *list)
+static int xennet_fill_frags(struct netfront_queue *queue,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
{
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
kfree_skb(nskb);
- return ~0U;
+ return -ENOENT;
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
kfree_skb(nskb);
}
- return cons;
+ queue->rx.rsp_cons = cons;
+
+ return 0;
}
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@@ -1045,8 +1047,7 @@ err:
skb->data_len = rx->status;
skb->len += rx->status;
- i = xennet_fill_frags(queue, skb, &tmpq);
- if (unlikely(i == ~0U))
+ if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1056,7 +1057,7 @@ err:
__skb_queue_tail(&rxq, skb);
- queue->rx.rsp_cons = ++i;
+ i = ++queue->rx.rsp_cons;
work_done++;
}
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 0f22379887ca..18cd96284b77 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -278,7 +278,6 @@ static struct i2c_driver nfcmrvl_i2c_driver = {
.remove = nfcmrvl_i2c_remove,
.driver = {
.name = "nfcmrvl_i2c",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_nfcmrvl_i2c_match),
},
};
diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig
index f6d6b345ba0d..7fe1bbe26568 100644
--- a/drivers/nfc/pn533/Kconfig
+++ b/drivers/nfc/pn533/Kconfig
@@ -26,3 +26,14 @@ config NFC_PN533_I2C
If you choose to build a module, it'll be called pn533_i2c.
Say N if unsure.
+
+config NFC_PN532_UART
+ tristate "NFC PN532 device support (UART)"
+ depends on SERIAL_DEV_BUS
+ select NFC_PN533
+ ---help---
+ This module adds support for the NXP pn532 UART interface.
+ Select this if your platform is using the UART bus.
+
+ If you choose to build a module, it'll be called pn532_uart.
+ Say N if unsure.
diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile
index 43c25b4f9466..b9648337576f 100644
--- a/drivers/nfc/pn533/Makefile
+++ b/drivers/nfc/pn533/Makefile
@@ -4,7 +4,9 @@
#
pn533_usb-objs = usb.o
pn533_i2c-objs = i2c.o
+pn532_uart-objs = uart.o
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o
obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o
+obj-$(CONFIG_NFC_PN532_UART) += pn532_uart.o
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 1832cd921ea7..7507176cca0a 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -193,12 +193,10 @@ static int pn533_i2c_probe(struct i2c_client *client,
phy->i2c_dev = client;
i2c_set_clientdata(client, phy);
- priv = pn533_register_device(PN533_DEVICE_PN532,
- PN533_NO_TYPE_B_PROTOCOLS,
- PN533_PROTO_REQ_ACK_RESP,
- phy, &i2c_phy_ops, NULL,
- &phy->i2c_dev->dev,
- &client->dev);
+ priv = pn53x_common_init(PN533_DEVICE_PN532,
+ PN533_PROTO_REQ_ACK_RESP,
+ phy, &i2c_phy_ops, NULL,
+ &phy->i2c_dev->dev);
if (IS_ERR(priv)) {
r = PTR_ERR(priv);
@@ -206,6 +204,9 @@ static int pn533_i2c_probe(struct i2c_client *client,
}
phy->priv = priv;
+ r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev);
+ if (r)
+ goto nfc_alloc_err;
r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn,
IRQF_TRIGGER_FALLING |
@@ -220,13 +221,20 @@ static int pn533_i2c_probe(struct i2c_client *client,
if (r)
goto fn_setup_err;
- return 0;
+ r = nfc_register_device(priv->nfc_dev);
+ if (r)
+ goto fn_setup_err;
+
+ return r;
fn_setup_err:
free_irq(client->irq, phy);
irq_rqst_err:
- pn533_unregister_device(phy->priv);
+ nfc_free_device(priv->nfc_dev);
+
+nfc_alloc_err:
+ pn53x_common_clean(phy->priv);
return r;
}
@@ -239,12 +247,18 @@ static int pn533_i2c_remove(struct i2c_client *client)
free_irq(client->irq, phy);
- pn533_unregister_device(phy->priv);
+ pn53x_unregister_nfc(phy->priv);
+ pn53x_common_clean(phy->priv);
return 0;
}
static const struct of_device_id of_pn533_i2c_match[] = {
+ { .compatible = "nxp,pn532", },
+ /*
+ * NOTE: The use of the compatibles with the trailing "...-i2c" is
+ * deprecated and will be removed.
+ */
{ .compatible = "nxp,pn533-i2c", },
{ .compatible = "nxp,pn532-i2c", },
{},
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index a172a32aa9d9..aa766e7ece70 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -185,6 +185,32 @@ struct pn533_cmd_jump_dep_response {
u8 gt[];
} __packed;
+struct pn532_autopoll_resp {
+ u8 type;
+ u8 ln;
+ u8 tg;
+ u8 tgdata[];
+};
+
+/* PN532_CMD_IN_AUTOPOLL */
+#define PN532_AUTOPOLL_POLLNR_INFINITE 0xff
+#define PN532_AUTOPOLL_PERIOD 0x03 /* in units of 150 ms */
+
+#define PN532_AUTOPOLL_TYPE_GENERIC_106 0x00
+#define PN532_AUTOPOLL_TYPE_GENERIC_212 0x01
+#define PN532_AUTOPOLL_TYPE_GENERIC_424 0x02
+#define PN532_AUTOPOLL_TYPE_JEWEL 0x04
+#define PN532_AUTOPOLL_TYPE_MIFARE 0x10
+#define PN532_AUTOPOLL_TYPE_FELICA212 0x11
+#define PN532_AUTOPOLL_TYPE_FELICA424 0x12
+#define PN532_AUTOPOLL_TYPE_ISOA 0x20
+#define PN532_AUTOPOLL_TYPE_ISOB 0x23
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106 0x40
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212 0x41
+#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424 0x42
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106 0x80
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_212 0x81
+#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_424 0x82
/* PN533_TG_INIT_AS_TARGET */
#define PN533_INIT_TARGET_PASSIVE 0x1
@@ -1389,6 +1415,101 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
return rc;
}
+static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ struct pn532_autopoll_resp *apr;
+ struct nfc_target nfc_tgt;
+ u8 nbtg;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+
+ nfc_err(dev->dev, "%s autopoll complete error %d\n",
+ __func__, rc);
+
+ if (rc == -ENOENT) {
+ if (dev->poll_mod_count != 0)
+ return rc;
+ goto stop_poll;
+ } else if (rc < 0) {
+ nfc_err(dev->dev,
+ "Error %d when running autopoll\n", rc);
+ goto stop_poll;
+ }
+ }
+
+ nbtg = resp->data[0];
+ if ((nbtg > 2) || (nbtg <= 0))
+ return -EAGAIN;
+
+ apr = (struct pn532_autopoll_resp *)&resp->data[1];
+ while (nbtg--) {
+ memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+ switch (apr->type) {
+ case PN532_AUTOPOLL_TYPE_ISOA:
+ dev_dbg(dev->dev, "ISOA\n");
+ rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_FELICA212:
+ case PN532_AUTOPOLL_TYPE_FELICA424:
+ dev_dbg(dev->dev, "FELICA\n");
+ rc = pn533_target_found_felica(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_JEWEL:
+ dev_dbg(dev->dev, "JEWEL\n");
+ rc = pn533_target_found_jewel(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_ISOB:
+ dev_dbg(dev->dev, "ISOB\n");
+ rc = pn533_target_found_type_b(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ case PN532_AUTOPOLL_TYPE_MIFARE:
+ dev_dbg(dev->dev, "Mifare\n");
+ rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
+ apr->ln - 1);
+ break;
+ default:
+ nfc_err(dev->dev,
+ "Unknown current poll modulation\n");
+ rc = -EPROTO;
+ }
+
+ if (rc)
+ goto done;
+
+ if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
+ nfc_err(dev->dev,
+ "The Tg found doesn't have the desired protocol\n");
+ rc = -EAGAIN;
+ goto done;
+ }
+
+ dev->tgt_available_prots = nfc_tgt.supported_protocols;
+ apr = (struct pn532_autopoll_resp *)
+ (apr->tgdata + (apr->ln - 1));
+ }
+
+ pn533_poll_reset_mod_list(dev);
+ nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+done:
+ dev_kfree_skb(resp);
+ return rc;
+
+stop_poll:
+ nfc_err(dev->dev, "autopoll operation has been stopped\n");
+
+ pn533_poll_reset_mod_list(dev);
+ dev->poll_protocols = 0;
+ return rc;
+}
+
static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct sk_buff *resp)
{
@@ -1532,6 +1653,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_poll_modulations *cur_mod;
+ struct sk_buff *skb;
u8 rand_mod;
int rc;
@@ -1557,9 +1679,73 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
tm_protocols = 0;
}
- pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
dev->poll_protocols = im_protocols;
dev->listen_protocols = tm_protocols;
+ if (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL) {
+ skb = pn533_alloc_skb(dev, 4 + 6);
+ if (!skb)
+ return -ENOMEM;
+
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_POLLNR_INFINITE;
+ *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_PERIOD;
+
+ if ((im_protocols & NFC_PROTO_MIFARE_MASK) &&
+ (im_protocols & NFC_PROTO_ISO14443_MASK) &&
+ (im_protocols & NFC_PROTO_NFC_DEP_MASK))
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_GENERIC_106;
+ else {
+ if (im_protocols & NFC_PROTO_MIFARE_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_MIFARE;
+
+ if (im_protocols & NFC_PROTO_ISO14443_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_ISOA;
+
+ if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424;
+ }
+ }
+
+ if (im_protocols & NFC_PROTO_FELICA_MASK ||
+ im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_FELICA212;
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_FELICA424;
+ }
+
+ if (im_protocols & NFC_PROTO_JEWEL_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_JEWEL;
+
+ if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_ISOB;
+
+ if (tm_protocols)
+ *((u8 *)skb_put(skb, sizeof(u8))) =
+ PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106;
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_AUTOPOLL, skb,
+ pn533_autopoll_complete, NULL);
+
+ if (rc < 0)
+ dev_kfree_skb(skb);
+ else
+ dev->poll_mod_count++;
+
+ return rc;
+ }
+
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
/* Do not always start polling from the same modulation */
get_random_bytes(&rand_mod, sizeof(rand_mod));
@@ -2458,7 +2644,11 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- if (dev->device_type == PN533_DEVICE_PN532) {
+ if (dev->phy_ops->dev_up)
+ dev->phy_ops->dev_up(dev);
+
+ if ((dev->device_type == PN533_DEVICE_PN532) ||
+ (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) {
int rc = pn532_sam_configuration(nfc_dev);
if (rc)
@@ -2470,7 +2660,14 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
static int pn533_dev_down(struct nfc_dev *nfc_dev)
{
- return pn533_rf_field(nfc_dev, 0);
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int ret;
+
+ ret = pn533_rf_field(nfc_dev, 0);
+ if (dev->phy_ops->dev_down && !ret)
+ dev->phy_ops->dev_down(dev);
+
+ return ret;
}
static struct nfc_ops pn533_nfc_ops = {
@@ -2498,6 +2695,7 @@ static int pn533_setup(struct pn533 *dev)
case PN533_DEVICE_PASORI:
case PN533_DEVICE_ACR122U:
case PN533_DEVICE_PN532:
+ case PN533_DEVICE_PN532_AUTOPOLL:
max_retries.mx_rty_atr = 0x2;
max_retries.mx_rty_psl = 0x1;
max_retries.mx_rty_passive_act =
@@ -2534,6 +2732,7 @@ static int pn533_setup(struct pn533 *dev)
switch (dev->device_type) {
case PN533_DEVICE_STD:
case PN533_DEVICE_PN532:
+ case PN533_DEVICE_PN532_AUTOPOLL:
break;
case PN533_DEVICE_PASORI:
@@ -2580,14 +2779,12 @@ int pn533_finalize_setup(struct pn533 *dev)
}
EXPORT_SYMBOL_GPL(pn533_finalize_setup);
-struct pn533 *pn533_register_device(u32 device_type,
- u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
- struct device *dev,
- struct device *parent)
+ struct device *dev)
{
struct pn533 *priv;
int rc = -ENOMEM;
@@ -2628,43 +2825,18 @@ struct pn533 *pn533_register_device(u32 device_type,
skb_queue_head_init(&priv->fragment_skb);
INIT_LIST_HEAD(&priv->cmd_queue);
-
- priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
- priv->ops->tx_header_len +
- PN533_CMD_DATAEXCH_HEAD_LEN,
- priv->ops->tx_tail_len);
- if (!priv->nfc_dev) {
- rc = -ENOMEM;
- goto destroy_wq;
- }
-
- nfc_set_parent_dev(priv->nfc_dev, parent);
- nfc_set_drvdata(priv->nfc_dev, priv);
-
- rc = nfc_register_device(priv->nfc_dev);
- if (rc)
- goto free_nfc_dev;
-
return priv;
-free_nfc_dev:
- nfc_free_device(priv->nfc_dev);
-
-destroy_wq:
- destroy_workqueue(priv->wq);
error:
kfree(priv);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(pn533_register_device);
+EXPORT_SYMBOL_GPL(pn53x_common_init);
-void pn533_unregister_device(struct pn533 *priv)
+void pn53x_common_clean(struct pn533 *priv)
{
struct pn533_cmd *cmd, *n;
- nfc_unregister_device(priv->nfc_dev);
- nfc_free_device(priv->nfc_dev);
-
flush_delayed_work(&priv->poll_work);
destroy_workqueue(priv->wq);
@@ -2679,8 +2851,47 @@ void pn533_unregister_device(struct pn533 *priv)
kfree(priv);
}
-EXPORT_SYMBOL_GPL(pn533_unregister_device);
+EXPORT_SYMBOL_GPL(pn53x_common_clean);
+
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+ struct device *parent)
+{
+ priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+ priv->ops->tx_header_len +
+ PN533_CMD_DATAEXCH_HEAD_LEN,
+ priv->ops->tx_tail_len);
+ if (!priv->nfc_dev)
+ return -ENOMEM;
+
+ nfc_set_parent_dev(priv->nfc_dev, parent);
+ nfc_set_drvdata(priv->nfc_dev, priv);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pn532_i2c_nfc_alloc);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+ struct device *parent)
+{
+ int rc;
+
+ rc = pn532_i2c_nfc_alloc(priv, protocols, parent);
+ if (rc)
+ return rc;
+
+ rc = nfc_register_device(priv->nfc_dev);
+ if (rc)
+ nfc_free_device(priv->nfc_dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pn53x_register_nfc);
+
+void pn53x_unregister_nfc(struct pn533 *priv)
+{
+ nfc_unregister_device(priv->nfc_dev);
+ nfc_free_device(priv->nfc_dev);
+}
+EXPORT_SYMBOL_GPL(pn53x_unregister_nfc);
MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index 8bf9d6ece0f5..b66f02a53167 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -6,10 +6,11 @@
* Copyright (C) 2012-2013 Tieto Poland
*/
-#define PN533_DEVICE_STD 0x1
-#define PN533_DEVICE_PASORI 0x2
-#define PN533_DEVICE_ACR122U 0x3
-#define PN533_DEVICE_PN532 0x4
+#define PN533_DEVICE_STD 0x1
+#define PN533_DEVICE_PASORI 0x2
+#define PN533_DEVICE_ACR122U 0x3
+#define PN533_DEVICE_PN532 0x4
+#define PN533_DEVICE_PN532_AUTOPOLL 0x5
#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
@@ -43,6 +44,11 @@
/* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */
#define PN533_STD_FRAME_ACK_SIZE 6
+/*
+ * Preamble (1), SoPC (2), Packet Length (1), Packet Length Checksum (1),
+ * Specific Application Level Error Code (1) , Postamble (1)
+ */
+#define PN533_STD_ERROR_FRAME_SIZE 8
#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
/* Half start code (3), LEN (4) should be 0xffff for extended frame */
@@ -70,6 +76,7 @@
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+#define PN533_CMD_IN_AUTOPOLL 0x60
#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
#define PN533_CMD_TG_GET_DATA 0x86
@@ -84,6 +91,9 @@
#define PN533_CMD_MI_MASK 0x40
#define PN533_CMD_RET_SUCCESS 0x00
+#define PN533_FRAME_DATALEN_ACK 0x00
+#define PN533_FRAME_DATALEN_ERROR 0x01
+#define PN533_FRAME_DATALEN_EXTENDED 0xFF
enum pn533_protocol_type {
PN533_PROTO_REQ_ACK_RESP = 0,
@@ -207,21 +217,33 @@ struct pn533_phy_ops {
struct sk_buff *out);
int (*send_ack)(struct pn533 *dev, gfp_t flags);
void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
+ /*
+ * dev_up and dev_down are optional.
+ * They are used to inform the phy layer that the nfc chip
+ * is going to be really used very soon. The phy layer can then
+ * bring up it's interface to the chip and have it suspended for power
+ * saving reasons otherwise.
+ */
+ void (*dev_up)(struct pn533 *priv);
+ void (*dev_down)(struct pn533 *priv);
};
-struct pn533 *pn533_register_device(u32 device_type,
- u32 protocols,
+struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
- struct device *dev,
- struct device *parent);
+ struct device *dev);
int pn533_finalize_setup(struct pn533 *dev);
-void pn533_unregister_device(struct pn533 *priv);
+void pn53x_common_clean(struct pn533 *priv);
void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
+int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols,
+ struct device *parent);
+int pn53x_register_nfc(struct pn533 *priv, u32 protocols,
+ struct device *parent);
+void pn53x_unregister_nfc(struct pn533 *priv);
bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame);
bool pn533_rx_frame_is_ack(void *_frame);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
new file mode 100644
index 000000000000..46e5ff16f699
--- /dev/null
+++ b/drivers/nfc/pn533/uart.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for NXP PN532 NFC Chip - UART transport layer
+ *
+ * Copyright (C) 2018 Lemonage Software GmbH
+ * Author: Lars Pöschel <poeschel@lemonage.de>
+ * All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include "pn533.h"
+
+#define PN532_UART_SKB_BUFF_LEN (PN533_CMD_DATAEXCH_DATA_MAXLEN * 2)
+
+enum send_wakeup {
+ PN532_SEND_NO_WAKEUP = 0,
+ PN532_SEND_WAKEUP,
+ PN532_SEND_LAST_WAKEUP,
+};
+
+
+struct pn532_uart_phy {
+ struct serdev_device *serdev;
+ struct sk_buff *recv_skb;
+ struct pn533 *priv;
+ /*
+ * send_wakeup variable is used to control if we need to send a wakeup
+ * request to the pn532 chip prior to our actual command. There is a
+ * little propability of a race condition. We decided to not mutex the
+ * variable as the worst that could happen is, that we send a wakeup
+ * to the chip that is already awake. This does not hurt. It is a
+ * no-op to the chip.
+ */
+ enum send_wakeup send_wakeup;
+ struct timer_list cmd_timeout;
+ struct sk_buff *cur_out_buf;
+};
+
+static int pn532_uart_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+{
+ /* wakeup sequence and dummy bytes for waiting time */
+ static const u8 wakeup[] = {
+ 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int err;
+
+ print_hex_dump_debug("PN532_uart TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ pn532->cur_out_buf = out;
+ if (pn532->send_wakeup) {
+ err = serdev_device_write(pn532->serdev,
+ wakeup, sizeof(wakeup),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+ }
+
+ if (pn532->send_wakeup == PN532_SEND_LAST_WAKEUP)
+ pn532->send_wakeup = PN532_SEND_NO_WAKEUP;
+
+ err = serdev_device_write(pn532->serdev, out->data, out->len,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ mod_timer(&pn532->cmd_timeout, HZ / 40 + jiffies);
+ return 0;
+}
+
+static int pn532_uart_send_ack(struct pn533 *dev, gfp_t flags)
+{
+ /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
+ static const u8 ack[PN533_STD_FRAME_ACK_SIZE] = {
+ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ struct pn532_uart_phy *pn532 = dev->phy;
+ int err;
+
+ err = serdev_device_write(pn532->serdev, ack, sizeof(ack),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void pn532_uart_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+ /* An ack will cancel the last issued command */
+ pn532_uart_send_ack(dev, flags);
+ /* schedule cmd_complete_work to finish current command execution */
+ pn533_recv_frame(dev, NULL, -ENOENT);
+}
+
+static void pn532_dev_up(struct pn533 *dev)
+{
+ struct pn532_uart_phy *pn532 = dev->phy;
+
+ serdev_device_open(pn532->serdev);
+ pn532->send_wakeup = PN532_SEND_LAST_WAKEUP;
+}
+
+static void pn532_dev_down(struct pn533 *dev)
+{
+ struct pn532_uart_phy *pn532 = dev->phy;
+
+ serdev_device_close(pn532->serdev);
+ pn532->send_wakeup = PN532_SEND_WAKEUP;
+}
+
+static struct pn533_phy_ops uart_phy_ops = {
+ .send_frame = pn532_uart_send_frame,
+ .send_ack = pn532_uart_send_ack,
+ .abort_cmd = pn532_uart_abort_cmd,
+ .dev_up = pn532_dev_up,
+ .dev_down = pn532_dev_down,
+};
+
+static void pn532_cmd_timeout(struct timer_list *t)
+{
+ struct pn532_uart_phy *dev = from_timer(dev, t, cmd_timeout);
+
+ pn532_uart_send_frame(dev->priv, dev->cur_out_buf);
+}
+
+/*
+ * scans the buffer if it contains a pn532 frame. It is not checked if the
+ * frame is really valid. This is later done with pn533_rx_frame_is_valid.
+ * This is useful for malformed or errornous transmitted frames. Adjusts the
+ * bufferposition where the frame starts, since pn533_recv_frame expects a
+ * well formed frame.
+ */
+static int pn532_uart_rx_is_frame(struct sk_buff *skb)
+{
+ struct pn533_std_frame *std;
+ struct pn533_ext_frame *ext;
+ u16 frame_len;
+ int i;
+
+ for (i = 0; i + PN533_STD_FRAME_ACK_SIZE <= skb->len; i++) {
+ std = (struct pn533_std_frame *)&skb->data[i];
+ /* search start code */
+ if (std->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+ continue;
+
+ /* frame type */
+ switch (std->datalen) {
+ case PN533_FRAME_DATALEN_ACK:
+ if (std->datalen_checksum == 0xff) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ case PN533_FRAME_DATALEN_ERROR:
+ if ((std->datalen_checksum == 0xff) &&
+ (skb->len >=
+ PN533_STD_ERROR_FRAME_SIZE)) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ case PN533_FRAME_DATALEN_EXTENDED:
+ ext = (struct pn533_ext_frame *)&skb->data[i];
+ frame_len = be16_to_cpu(ext->datalen);
+ if (skb->len >= frame_len +
+ sizeof(struct pn533_ext_frame) +
+ 2 /* CKS + Postamble */) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ default: /* normal information frame */
+ frame_len = std->datalen;
+ if (skb->len >= frame_len +
+ sizeof(struct pn533_std_frame) +
+ 2 /* CKS + Postamble */) {
+ skb_pull(skb, i);
+ return 1;
+ }
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int pn532_receive_buf(struct serdev_device *serdev,
+ const unsigned char *data, size_t count)
+{
+ struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
+ size_t i;
+
+ del_timer(&dev->cmd_timeout);
+ for (i = 0; i < count; i++) {
+ skb_put_u8(dev->recv_skb, *data++);
+ if (!pn532_uart_rx_is_frame(dev->recv_skb))
+ continue;
+
+ pn533_recv_frame(dev->priv, dev->recv_skb, 0);
+ dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!dev->recv_skb)
+ return 0;
+ }
+
+ return i;
+}
+
+static struct serdev_device_ops pn532_serdev_ops = {
+ .receive_buf = pn532_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static const struct of_device_id pn532_uart_of_match[] = {
+ { .compatible = "nxp,pn532", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pn532_uart_of_match);
+
+static int pn532_uart_probe(struct serdev_device *serdev)
+{
+ struct pn532_uart_phy *pn532;
+ struct pn533 *priv;
+ int err;
+
+ err = -ENOMEM;
+ pn532 = kzalloc(sizeof(*pn532), GFP_KERNEL);
+ if (!pn532)
+ goto err_exit;
+
+ pn532->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!pn532->recv_skb)
+ goto err_free;
+
+ pn532->serdev = serdev;
+ serdev_device_set_drvdata(serdev, pn532);
+ serdev_device_set_client_ops(serdev, &pn532_serdev_ops);
+ err = serdev_device_open(serdev);
+ if (err) {
+ dev_err(&serdev->dev, "Unable to open device\n");
+ goto err_skb;
+ }
+
+ err = serdev_device_set_baudrate(serdev, 115200);
+ if (err != 115200) {
+ err = -EINVAL;
+ goto err_serdev;
+ }
+
+ serdev_device_set_flow_control(serdev, false);
+ pn532->send_wakeup = PN532_SEND_WAKEUP;
+ timer_setup(&pn532->cmd_timeout, pn532_cmd_timeout, 0);
+ priv = pn53x_common_init(PN533_DEVICE_PN532_AUTOPOLL,
+ PN533_PROTO_REQ_ACK_RESP,
+ pn532, &uart_phy_ops, NULL,
+ &pn532->serdev->dev);
+ if (IS_ERR(priv)) {
+ err = PTR_ERR(priv);
+ goto err_serdev;
+ }
+
+ pn532->priv = priv;
+ err = pn533_finalize_setup(pn532->priv);
+ if (err)
+ goto err_clean;
+
+ serdev_device_close(serdev);
+ err = pn53x_register_nfc(priv, PN533_NO_TYPE_B_PROTOCOLS, &serdev->dev);
+ if (err) {
+ pn53x_common_clean(pn532->priv);
+ goto err_skb;
+ }
+
+ return err;
+
+err_clean:
+ pn53x_common_clean(pn532->priv);
+err_serdev:
+ serdev_device_close(serdev);
+err_skb:
+ kfree_skb(pn532->recv_skb);
+err_free:
+ kfree(pn532);
+err_exit:
+ return err;
+}
+
+static void pn532_uart_remove(struct serdev_device *serdev)
+{
+ struct pn532_uart_phy *pn532 = serdev_device_get_drvdata(serdev);
+
+ pn53x_unregister_nfc(pn532->priv);
+ serdev_device_close(serdev);
+ pn53x_common_clean(pn532->priv);
+ kfree_skb(pn532->recv_skb);
+ kfree(pn532);
+}
+
+static struct serdev_device_driver pn532_uart_driver = {
+ .probe = pn532_uart_probe,
+ .remove = pn532_uart_remove,
+ .driver = {
+ .name = "pn532_uart",
+ .of_match_table = of_match_ptr(pn532_uart_of_match),
+ },
+};
+
+module_serdev_device_driver(pn532_uart_driver);
+
+MODULE_AUTHOR("Lars Pöschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("PN532 UART driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index c5289eaf17ee..4590fbf82dc2 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -534,9 +534,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
goto error;
}
- priv = pn533_register_device(id->driver_info, protocols, protocol_type,
+ priv = pn53x_common_init(id->driver_info, protocol_type,
phy, &usb_phy_ops, fops,
- &phy->udev->dev, &interface->dev);
+ &phy->udev->dev);
if (IS_ERR(priv)) {
rc = PTR_ERR(priv);
@@ -547,18 +547,28 @@ static int pn533_usb_probe(struct usb_interface *interface,
rc = pn533_finalize_setup(priv);
if (rc)
- goto error;
+ goto err_clean;
usb_set_intfdata(interface, phy);
+ rc = pn53x_register_nfc(priv, protocols, &interface->dev);
+ if (rc)
+ goto err_clean;
return 0;
+err_clean:
+ pn53x_common_clean(priv);
error:
+ usb_kill_urb(phy->in_urb);
+ usb_kill_urb(phy->out_urb);
+ usb_kill_urb(phy->ack_urb);
+
usb_free_urb(phy->in_urb);
usb_free_urb(phy->out_urb);
usb_free_urb(phy->ack_urb);
usb_put_dev(phy->udev);
kfree(in_buf);
+ kfree(phy->ack_buffer);
return rc;
}
@@ -570,7 +580,8 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
if (!phy)
return;
- pn533_unregister_device(phy->priv);
+ pn53x_unregister_nfc(phy->priv);
+ pn53x_common_clean(phy->priv);
usb_set_intfdata(interface, NULL);
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index e4f7fa00862d..b4eb926d220a 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -279,7 +279,6 @@ MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
static struct i2c_driver s3fwrn5_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = S3FWRN5_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
},
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 108f60b46804..fa7ba09dca77 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
*/
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
- revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
+ /*
+ * Revalidate after unblocking dispatchers that may be holding bd_butex
+ */
+ revalidate_disk(ns->disk);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -113,10 +116,26 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
/*
* Only new queue scan work when admin and IO queues are both alive
*/
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
queue_work(nvme_wq, &ctrl->scan_work);
}
+/*
+ * Use this function to proceed with scheduling reset_work for a controller
+ * that had previously been set to the resetting state. This is intended for
+ * code paths that can't be interrupted by other reset attempts. A hot removal
+ * may prevent this from succeeding.
+ */
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->state != NVME_CTRL_RESETTING)
+ return -EBUSY;
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -134,8 +153,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
ret = nvme_reset_ctrl(ctrl);
if (!ret) {
flush_work(&ctrl->reset_work);
- if (ctrl->state != NVME_CTRL_LIVE &&
- ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ if (ctrl->state != NVME_CTRL_LIVE)
ret = -ENETRESET;
}
@@ -312,15 +330,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
- case NVME_CTRL_ADMIN_ONLY:
- switch (old_state) {
- case NVME_CTRL_CONNECTING:
- changed = true;
- /* FALLTHRU */
- default:
- break;
- }
- break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -336,7 +345,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -356,7 +364,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
@@ -378,8 +385,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
- if (changed)
+ if (changed) {
ctrl->state = new_state;
+ wake_up_all(&ctrl->state_wq);
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
if (changed && ctrl->state == NVME_CTRL_LIVE)
@@ -388,6 +397,39 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+ switch (ctrl->state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_CONNECTING:
+ return false;
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ return true;
+ default:
+ WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+ return true;
+ }
+}
+
+/*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+ */
+bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+{
+ wait_event(ctrl->state_wq,
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+ nvme_state_terminal(ctrl));
+ return ctrl->state == NVME_CTRL_RESETTING;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
static void nvme_free_ns_head(struct kref *ref)
{
struct nvme_ns_head *head =
@@ -847,7 +889,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, u32 *result, unsigned timeout)
+ u32 meta_seed, u64 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -888,7 +930,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
else
ret = nvme_req(req)->status;
if (result)
- *result = le32_to_cpu(nvme_req(req)->result.u32);
+ *result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@@ -1303,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl)
if (ns->disk && nvme_revalidate_disk(ns->disk))
nvme_set_queue_dying(ns);
up_read(&ctrl->namespaces_rwsem);
-
- nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1320,6 +1360,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
nvme_unfreeze(ctrl);
nvme_mpath_unfreeze(ctrl->subsys);
mutex_unlock(&ctrl->subsys->lock);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1335,6 +1376,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_command c;
unsigned timeout = 0;
u32 effects;
+ u64 result;
+ int status;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+ if (cmd.timeout_ms)
+ timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+ effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+ (void __user *)(uintptr_t)cmd.metadata,
+ cmd.metadata_len, 0, &result, timeout);
+ nvme_passthru_end(ctrl, effects);
+
+ if (status >= 0) {
+ if (put_user(result, &ucmd->result))
+ return -EFAULT;
+ }
+
+ return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_passthru_cmd64 __user *ucmd)
+{
+ struct nvme_passthru_cmd64 cmd;
+ struct nvme_command c;
+ unsigned timeout = 0;
+ u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@@ -1405,6 +1494,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
srcu_read_unlock(&head->srcu, idx);
}
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+ if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+ return true;
+ if (is_sed_ioctl(cmd))
+ return true;
+ return false;
+}
+
+static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ void __user *argp,
+ struct nvme_ns_head *head,
+ int srcu_idx)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ nvme_get_ctrl(ns->ctrl);
+ nvme_put_ns_from_disk(head, srcu_idx);
+
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ ret = nvme_user_cmd(ctrl, NULL, argp);
+ break;
+ case NVME_IOCTL_ADMIN64_CMD:
+ ret = nvme_user_cmd64(ctrl, NULL, argp);
+ break;
+ default:
+ ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+ break;
+ }
+ nvme_put_ctrl(ctrl);
+ return ret;
+}
+
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -1422,20 +1546,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
* seperately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
- if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
- struct nvme_ctrl *ctrl = ns->ctrl;
-
- nvme_get_ctrl(ns->ctrl);
- nvme_put_ns_from_disk(head, srcu_idx);
-
- if (cmd == NVME_IOCTL_ADMIN_CMD)
- ret = nvme_user_cmd(ctrl, NULL, argp);
- else
- ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
-
- nvme_put_ctrl(ctrl);
- return ret;
- }
+ if (is_ctrl_ioctl(cmd))
+ return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
switch (cmd) {
case NVME_IOCTL_ID:
@@ -1448,6 +1560,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
case NVME_IOCTL_SUBMIT_IO:
ret = nvme_submit_io(ns, argp);
break;
+ case NVME_IOCTL_IO64_CMD:
+ ret = nvme_user_cmd64(ns->ctrl, ns, argp);
+ break;
default:
if (ns->ndev)
ret = nvme_nvm_ioctl(ns, cmd, arg);
@@ -2289,6 +2404,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kingston E8FK11.T firmware version has no interrupt
+ * after resume with actions related to suspend to idle
+ * https://bugzilla.kernel.org/show_bug.cgi?id=204887
+ */
+ .vid = 0x2646,
+ .fr = "E8FK11.T",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
@@ -2540,8 +2665,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems);
}
- if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
- dev_name(ctrl->device))) {
+ ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+ dev_name(ctrl->device));
+ if (ret) {
dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n");
goto out_put_subsystem;
@@ -2786,7 +2912,6 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
switch (ctrl->state) {
case NVME_CTRL_LIVE:
- case NVME_CTRL_ADMIN_ONLY:
break;
default:
return -EWOULDBLOCK;
@@ -2838,6 +2963,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
+ case NVME_IOCTL_ADMIN64_CMD:
+ return nvme_user_cmd64(ctrl, NULL, argp);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
@@ -3045,6 +3172,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
nvme_show_int_function(cntlid);
nvme_show_int_function(numa_node);
+nvme_show_int_function(queue_count);
+nvme_show_int_function(sqsize);
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -3076,7 +3205,6 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
- [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -3125,6 +3253,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_address.attr,
&dev_attr_state.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_queue_count.attr,
+ &dev_attr_sqsize.attr,
NULL
};
@@ -3585,11 +3715,10 @@ static void nvme_scan_work(struct work_struct *work)
struct nvme_id_ctrl *id;
unsigned nn;
- if (ctrl->state != NVME_CTRL_LIVE)
+ /* No tagset on a live ctrl means IO queues could not created */
+ if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
return;
- WARN_ON_ONCE(!ctrl->tagset);
-
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
@@ -3750,13 +3879,13 @@ static void nvme_fw_act_work(struct work_struct *work)
if (time_after(jiffies, fw_act_timeout)) {
dev_warn(ctrl->device,
"Fw activation timeout, reset controller\n");
- nvme_reset_ctrl(ctrl);
- break;
+ nvme_try_sched_reset(ctrl);
+ return;
}
msleep(100);
}
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
return;
nvme_start_queues(ctrl);
@@ -3776,7 +3905,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- queue_work(nvme_wq, &ctrl->fw_act_work);
+ /*
+ * We are (ab)using the RESETTING state to prevent subsequent
+ * recovery actions from interfering with the controller's
+ * firmware activation.
+ */
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -3899,6 +4034,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+ init_waitqueue_head(&ctrl->state_wq);
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 93f08d77c896..a0ec40ab62ee 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -182,8 +182,7 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE ||
- ctrl->state == NVME_CTRL_ADMIN_ONLY))
+ if (likely(ctrl->state == NVME_CTRL_LIVE))
return true;
return __nvmf_check_ready(ctrl, rq, queue_live);
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 30de7efef003..fc99a40c1ec4 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -522,14 +522,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
return 0;
}
-static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
{
u32 nr_change_groups = 0;
int error;
mutex_lock(&ctrl->ana_lock);
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
- groups_only ? NVME_ANA_LOG_RGO : 0,
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
ctrl->ana_log_buf, ctrl->ana_log_size, 0);
if (error) {
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -565,7 +564,7 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
- nvme_read_ana_log(ctrl, false);
+ nvme_read_ana_log(ctrl);
}
static void nvme_anatt_timeout(struct timer_list *t)
@@ -715,7 +714,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
goto out;
}
- error = nvme_read_ana_log(ctrl, true);
+ error = nvme_read_ana_log(ctrl);
if (error)
goto out_free_ana_log_buf;
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b5013c101b35..22e8401352c2 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -15,6 +15,7 @@
#include <linux/sed-opal.h>
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
+#include <linux/wait.h>
#include <trace/events/block.h>
@@ -161,7 +162,6 @@ static inline u16 nvme_req_qid(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
- NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
@@ -199,6 +199,7 @@ struct nvme_ctrl {
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
+ wait_queue_head_t state_wq;
struct nvme_subsystem *subsys;
struct list_head subsys_entry;
@@ -221,6 +222,7 @@ struct nvme_ctrl {
u16 oacs;
u16 nssa;
u16 nr_streams;
+ u16 sqsize;
u32 max_namespaces;
atomic_t abort_limit;
u8 vwc;
@@ -269,7 +271,6 @@ struct nvme_ctrl {
u16 hmmaxd;
/* Fabrics only */
- u16 sqsize;
u32 ioccsz;
u32 iorcsz;
u16 icdoff;
@@ -449,6 +450,7 @@ void nvme_complete_rq(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -499,6 +501,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c0808f9eb8ab..869f462e6b6e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -773,7 +773,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
struct bio_vec *bv)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+ unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
+ unsigned int first_prp_len = dev->ctrl.page_size - offset;
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -2263,10 +2264,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-/*
- * return error value only when tagset allocation failed
- */
-static int nvme_dev_add(struct nvme_dev *dev)
+static void nvme_dev_add(struct nvme_dev *dev)
{
int ret;
@@ -2296,7 +2294,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (ret) {
dev_warn(dev->ctrl.device,
"IO queues tagset allocation failed %d\n", ret);
- return ret;
+ return;
}
dev->ctrl.tagset = &dev->tagset;
} else {
@@ -2307,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
}
nvme_dbbuf_set(dev);
- return 0;
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2467,6 +2464,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_unlock(&dev->shutdown_lock);
}
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+{
+ if (!nvme_wait_reset(&dev->ctrl))
+ return -EBUSY;
+ nvme_dev_disable(dev, shutdown);
+ return 0;
+}
+
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
@@ -2490,14 +2495,20 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
dma_pool_destroy(dev->prp_small_pool);
}
+static void nvme_free_tagset(struct nvme_dev *dev)
+{
+ if (dev->tagset.tags)
+ blk_mq_free_tag_set(&dev->tagset);
+ dev->ctrl.tagset = NULL;
+}
+
static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
nvme_dbbuf_dma_free(dev);
put_device(dev->dev);
- if (dev->tagset.tags)
- blk_mq_free_tag_set(&dev->tagset);
+ nvme_free_tagset(dev);
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
@@ -2508,6 +2519,11 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
{
+ /*
+ * Set state to deleting now to avoid blocking nvme_wait_reset(), which
+ * may be holding this pci_dev's device lock.
+ */
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
nvme_kill_queues(&dev->ctrl);
@@ -2521,7 +2537,6 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
result = -ENODEV;
@@ -2615,13 +2630,11 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
- new_state = NVME_CTRL_ADMIN_ONLY;
+ nvme_free_tagset(dev);
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- /* hit this only when allocate tagset fails */
- if (nvme_dev_add(dev))
- new_state = NVME_CTRL_ADMIN_ONLY;
+ nvme_dev_add(dev);
nvme_unfreeze(&dev->ctrl);
}
@@ -2629,9 +2642,9 @@ static void nvme_reset_work(struct work_struct *work)
* If only admin queue live, keep it to do further investigation or
* recovery.
*/
- if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
dev_warn(dev->ctrl.device,
- "failed to mark controller state %d\n", new_state);
+ "failed to mark controller live state\n");
result = -ENODEV;
goto out;
}
@@ -2672,7 +2685,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- *val = readq(to_nvme_dev(ctrl)->bar + off);
+ *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
return 0;
}
@@ -2836,19 +2849,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void nvme_reset_prepare(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_disable(dev, false);
+
+ /*
+ * We don't need to check the return value from waiting for the reset
+ * state as pci_dev device lock is held, making it impossible to race
+ * with ->remove().
+ */
+ nvme_disable_prepare_reset(dev, false);
+ nvme_sync_queues(&dev->ctrl);
}
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl_sync(&dev->ctrl);
+
+ if (!nvme_try_sched_reset(&dev->ctrl))
+ flush_work(&dev->ctrl.reset_work);
}
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_disable(dev, true);
+ nvme_disable_prepare_reset(dev, true);
}
/*
@@ -2901,7 +2923,7 @@ static int nvme_resume(struct device *dev)
if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
- nvme_reset_ctrl(ctrl);
+ return nvme_try_sched_reset(&ndev->ctrl);
return 0;
}
@@ -2929,43 +2951,42 @@ static int nvme_suspend(struct device *dev)
*/
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
- (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
- nvme_dev_disable(ndev, true);
- return 0;
- }
+ (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
+ return nvme_disable_prepare_reset(ndev, true);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
nvme_sync_queues(ctrl);
- if (ctrl->state != NVME_CTRL_LIVE &&
- ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ if (ctrl->state != NVME_CTRL_LIVE)
goto unfreeze;
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
if (ret < 0)
goto unfreeze;
+ /*
+ * A saved state prevents pci pm from generically controlling the
+ * device's power. If we're using protocol specific settings, we don't
+ * want pci interfering.
+ */
+ pci_save_state(pdev);
+
ret = nvme_set_power_state(ctrl, ctrl->npss);
if (ret < 0)
goto unfreeze;
if (ret) {
+ /* discard the saved state */
+ pci_load_saved_state(pdev, NULL);
+
/*
* Clearing npss forces a controller reset on resume. The
* correct value will be resdicovered then.
*/
- nvme_dev_disable(ndev, true);
+ ret = nvme_disable_prepare_reset(ndev, true);
ctrl->npss = 0;
- ret = 0;
- goto unfreeze;
}
- /*
- * A saved state prevents pci pm from generically controlling the
- * device's power. If we're using protocol specific settings, we don't
- * want pci interfering.
- */
- pci_save_state(pdev);
unfreeze:
nvme_unfreeze(ctrl);
return ret;
@@ -2974,9 +2995,7 @@ unfreeze:
static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
-
- nvme_dev_disable(ndev, true);
- return 0;
+ return nvme_disable_prepare_reset(ndev, true);
}
static int nvme_simple_resume(struct device *dev)
@@ -2984,8 +3003,7 @@ static int nvme_simple_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&ndev->ctrl);
- return 0;
+ return nvme_try_sched_reset(&ndev->ctrl);
}
static const struct dev_pm_ops nvme_dev_pm_ops = {
@@ -3090,6 +3108,9 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dfa07bb9dfeb..f19a28b4e997 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
{
return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ibdev->attrs.max_fast_reg_page_list_len);
+ ibdev->attrs.max_fast_reg_page_list_len - 1);
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
const int cq_factor = send_wr_factor + 1; /* + RECV */
int comp_vector, idx = nvme_rdma_queue_idx(queue);
enum ib_poll_context poll_ctx;
- int ret;
+ int ret, pages_per_mr;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
if (!queue->device) {
@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
goto out_destroy_qp;
}
+ /*
+ * Currently we don't use SG_GAPS MR's so if the first entry is
+ * misaligned we'll end up using two entries for a single data page,
+ * so one additional entry is required.
+ */
+ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
- nvme_rdma_get_max_fr_pages(ibdev), 0);
+ pages_per_mr, 0);
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
} else {
- __nvme_rdma_stop_queue(queue);
+ if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
}
@@ -820,8 +827,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_stop_queue;
- ctrl->ctrl.max_hw_sectors =
- (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1694,6 +1701,14 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
+ /*
+ * Restart the timer if a controller reset is already scheduled. Any
+ * timed out commands would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
* Teardown immediately if controller times out while starting
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ffd5957637a..7544be84ab35 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
{
struct nvme_tcp_queue *queue =
container_of(w, struct nvme_tcp_queue, io_work);
- unsigned long start = jiffies + msecs_to_jiffies(1);
+ unsigned long deadline = jiffies + msecs_to_jiffies(1);
do {
bool pending = false;
@@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
if (!pending)
return;
- } while (time_after(jiffies, start)); /* quota is exhausted */
+ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
@@ -1386,7 +1386,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
queue->sock->sk->sk_ll_usec = 1;
+#endif
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
return 0;
@@ -2044,6 +2046,14 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ /*
+ * Restart the timer if a controller reset is already scheduled. Any
+ * timed out commands would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
dev_warn(ctrl->ctrl.device,
"queue %d: timeout request %#x type %d\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
@@ -2126,6 +2136,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
ret = nvme_tcp_map_data(queue, rq);
if (unlikely(ret)) {
+ nvme_cleanup_cmd(rq);
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", ret);
return ret;
@@ -2208,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
- if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+ if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
return queue->nr_cqe;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index de0bff70ebb6..32008d85172b 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -11,10 +11,10 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of physical blocks per logical block. */
- const u32 ppl = ql->physical_block_size / ql->logical_block_size;
- /* Physical blocks per logical block, 0's based. */
- const __le16 ppl0b = to0based(ppl);
+ /* Number of logical blocks per physical block. */
+ const u32 lpp = ql->physical_block_size / ql->logical_block_size;
+ /* Logical blocks per physical block, 0's based. */
+ const __le16 lpp0b = to0based(lpp);
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
* field from the identify controller data structure should be used.
*/
id->nsfeat |= 1 << 1;
- id->nawun = ppl0b;
- id->nawupf = ppl0b;
- id->nacwu = ppl0b;
+ id->nawun = lpp0b;
+ id->nawupf = lpp0b;
+ id->nacwu = lpp0b;
/*
* Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
- id->npwg = ppl0b;
+ id->npwg = lpp0b;
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 748a39fca771..11f5aea97d1b 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -157,8 +157,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table,
blk_rq_nr_phys_segments(req),
- iod->sg_table.sgl, SG_CHUNK_SIZE))
+ iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+ nvme_cleanup_cmd(req);
return BLK_STS_RESOURCE;
+ }
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index bf4f03474e89..d535080b781f 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
return NVME_SC_INTERNAL;
}
@@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
if (queue->nvme_sq.sqhd_disabled) {
kfree(cmd->iov);
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
}
return 1;
@@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
return -EAGAIN;
kfree(cmd->iov);
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
nvmet_req_uninit(&cmd->req);
nvmet_tcp_unmap_pdu_iovec(cmd);
kfree(cmd->iov);
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index bd6129db6417..c6b87ce2b0cc 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -361,8 +361,8 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
struct phy_device *phy;
int ret;
- iface = of_get_phy_mode(np);
- if ((int)iface < 0)
+ ret = of_get_phy_mode(np, &iface);
+ if (ret)
return NULL;
if (of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index b02734aff8c1..6e411821583e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -15,16 +15,20 @@
/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
+ * @interface: Pointer to the result
*
* The function gets phy interface string from property 'phy-mode' or
- * 'phy-connection-type', and return its index in phy_modes table, or errno in
- * error case.
+ * 'phy-connection-type'. The index in phy_modes table is set in
+ * interface and 0 returned. In case of error interface is set to
+ * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV.
*/
-int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
{
const char *pm;
int err, i;
+ *interface = PHY_INTERFACE_MODE_NA;
+
err = of_property_read_string(np, "phy-mode", &pm);
if (err < 0)
err = of_property_read_string(np, "phy-connection-type", &pm);
@@ -32,8 +36,10 @@ int of_get_phy_mode(struct device_node *np)
return err;
for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
- if (!strcasecmp(pm, phy_modes(i)))
- return i;
+ if (!strcasecmp(pm, phy_modes(i))) {
+ *interface = i;
+ return 0;
+ }
return -ENODEV;
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 7989703b883c..6bd610ee2cd7 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
if (!target)
return -ENODEV;
- if (!of_device_is_available(target))
+ if (!of_device_is_available(target)) {
+ of_node_put(target);
return 0;
+ }
rmem = __find_rmem(target);
of_node_put(target);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 480a21e2ed39..92e895d86458 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1207,6 +1207,7 @@ static int __init unittest_data_add(void)
of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ kfree(unittest_data);
return -ENODATA;
}
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 3b7ffd0234e9..9ff0538ee83a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
goto free_regulators;
}
- ret = regulator_enable(reg);
- if (ret < 0) {
- regulator_put(reg);
- goto free_regulators;
- }
-
opp_table->regulators[i] = reg;
}
@@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
return opp_table;
free_regulators:
- while (i--) {
- regulator_disable(opp_table->regulators[i]);
- regulator_put(opp_table->regulators[i]);
- }
+ while (i != 0)
+ regulator_put(opp_table->regulators[--i]);
kfree(opp_table->regulators);
opp_table->regulators = NULL;
@@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- for (i = opp_table->regulator_count - 1; i >= 0; i--) {
- regulator_disable(opp_table->regulators[i]);
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
- }
_free_set_opp_data(opp_table);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 1813f5ad5fa2..1cbb58240b80 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
{
struct dev_pm_opp *opp;
- lockdep_assert_held(&opp_table_lock);
-
mutex_lock(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
@@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
return 0;
}
+ /*
+ * Re-initialize list_kref every time we add static OPPs to the OPP
+ * table as the reference count may be 0 after the last tie static OPPs
+ * were removed.
+ */
+ kref_init(&opp_table->list_kref);
+
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
opp = _opp_add_static_v2(opp_table, dev, np);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index ed50502cc65a..de8e4e347249 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -678,14 +678,6 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return(0);
}
- /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
- * first, then fall back to 32-bit if that fails.
- * We are just "encouraging" 32-bit DMA masks here since we can
- * never allow IOMMU bypass unless we add special support for ZX1.
- */
- if (mask > ~0U)
- return 0;
-
ioc = GET_IOC(dev);
if (!ioc)
return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e7982af9a5d8..a97e2571a527 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -959,19 +959,6 @@ void pci_refresh_power_state(struct pci_dev *dev)
}
/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- if (platform_pci_power_manageable(dev))
- platform_pci_set_power_state(dev, PCI_D0);
-
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
-}
-
-/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
* @state: State to put the device into.
@@ -1154,6 +1141,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
EXPORT_SYMBOL(pci_set_power_state);
/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ __pci_start_power_transition(dev, PCI_D0);
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
+/**
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
* @state: target sleep state for the whole system. This is the value
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 648ddb7f038a..c6800d220920 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -87,7 +87,7 @@ FUNC_GROUP_DECL(MACLINK3, L23);
#define K25 7
SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7));
-SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7));
+SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7));
PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14);
FUNC_GROUP_DECL(MACLINK4, K25);
@@ -1262,13 +1262,13 @@ GROUP_DECL(SPI1, AB11, AC11, AA11);
#define AD11 206
SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14));
SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13,
- SIG_DESC_SET(SCU438, 14));
+ SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14));
PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13);
#define AF10 207
SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15));
SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
- SIG_DESC_SET(SCU438, 15));
+ SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
@@ -1440,91 +1440,85 @@ FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1);
FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1);
#define AB4 232
-SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24));
-PIN_DECL_1(AB4, GPIO18D0, SD3CLK);
+SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24));
+PIN_DECL_1(AB4, GPIO18D0, EMMCCLK);
#define AA4 233
-SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25));
-PIN_DECL_1(AA4, GPIO18D1, SD3CMD);
+SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25));
+PIN_DECL_1(AA4, GPIO18D1, EMMCCMD);
#define AC4 234
-SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26));
-PIN_DECL_1(AC4, GPIO18D2, SD3DAT0);
+SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26));
+PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0);
#define AA5 235
-SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27));
-PIN_DECL_1(AA5, GPIO18D3, SD3DAT1);
+SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27));
+PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1);
#define Y5 236
-SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28));
-PIN_DECL_1(Y5, GPIO18D4, SD3DAT2);
+SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28));
+PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2);
#define AB5 237
-SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29));
-PIN_DECL_1(AB5, GPIO18D5, SD3DAT3);
+SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29));
+PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3);
#define AB6 238
-SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30));
-PIN_DECL_1(AB6, GPIO18D6, SD3CD);
+SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30));
+PIN_DECL_1(AB6, GPIO18D6, EMMCCD);
#define AC5 239
-SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31));
-PIN_DECL_1(AC5, GPIO18D7, SD3WP);
+SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31));
+PIN_DECL_1(AC5, GPIO18D7, EMMCWP);
-FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
+GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5);
+GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
#define Y1 240
SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0));
-PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4);
-FUNC_GROUP_DECL(SD3DAT4, Y1);
+SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0));
+PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4);
#define Y2 241
SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1));
-PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5);
-FUNC_GROUP_DECL(SD3DAT5, Y2);
+SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1));
+PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5);
#define Y3 242
SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID,
SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2));
-PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6);
-FUNC_GROUP_DECL(SD3DAT6, Y3);
+SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2));
+PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6);
#define Y4 243
SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID,
SIG_DESC_SET(SCU500, 3));
SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3));
-PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7);
-FUNC_GROUP_DECL(SD3DAT7, Y4);
+SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
-
+FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
/*
* FIXME: Confirm bits and priorities are the right way around for the
* following 4 pins
*/
#define AF25 244
-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
- SIG_DESC_SET(SCU4D8, 20));
-SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20),
- SIG_DESC_SET(SCU4D8, 20));
+SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
+SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
#define AE26 245
-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
- SIG_DESC_SET(SCU4D8, 21));
-SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21),
- SIG_DESC_SET(SCU4D8, 21));
+SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
+SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
@@ -1533,18 +1527,14 @@ FUNC_DECL_2(I3C3, HVI3C3, I3C3);
FUNC_GROUP_DECL(FSI1, AF25, AE26);
#define AE25 246
-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
- SIG_DESC_SET(SCU4D8, 22));
-SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22),
- SIG_DESC_SET(SCU4D8, 22));
+SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
+SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
#define AF24 247
-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
- SIG_DESC_SET(SCU4D8, 23));
-SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23),
- SIG_DESC_SET(SCU4D8, 23));
+SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
+SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
@@ -1574,6 +1564,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(AA11),
ASPEED_PINCTRL_PIN(AA12),
+ ASPEED_PINCTRL_PIN(AA16),
+ ASPEED_PINCTRL_PIN(AA17),
ASPEED_PINCTRL_PIN(AA23),
ASPEED_PINCTRL_PIN(AA24),
ASPEED_PINCTRL_PIN(AA25),
@@ -1585,6 +1577,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AB11),
ASPEED_PINCTRL_PIN(AB12),
ASPEED_PINCTRL_PIN(AB15),
+ ASPEED_PINCTRL_PIN(AB16),
+ ASPEED_PINCTRL_PIN(AB17),
ASPEED_PINCTRL_PIN(AB18),
ASPEED_PINCTRL_PIN(AB19),
ASPEED_PINCTRL_PIN(AB22),
@@ -1602,6 +1596,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AC11),
ASPEED_PINCTRL_PIN(AC12),
ASPEED_PINCTRL_PIN(AC15),
+ ASPEED_PINCTRL_PIN(AC16),
ASPEED_PINCTRL_PIN(AC17),
ASPEED_PINCTRL_PIN(AC18),
ASPEED_PINCTRL_PIN(AC19),
@@ -1619,6 +1614,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AD12),
ASPEED_PINCTRL_PIN(AD14),
ASPEED_PINCTRL_PIN(AD15),
+ ASPEED_PINCTRL_PIN(AD16),
ASPEED_PINCTRL_PIN(AD19),
ASPEED_PINCTRL_PIN(AD20),
ASPEED_PINCTRL_PIN(AD22),
@@ -1634,8 +1630,11 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AE12),
ASPEED_PINCTRL_PIN(AE14),
ASPEED_PINCTRL_PIN(AE15),
+ ASPEED_PINCTRL_PIN(AE16),
ASPEED_PINCTRL_PIN(AE18),
ASPEED_PINCTRL_PIN(AE19),
+ ASPEED_PINCTRL_PIN(AE25),
+ ASPEED_PINCTRL_PIN(AE26),
ASPEED_PINCTRL_PIN(AE7),
ASPEED_PINCTRL_PIN(AE8),
ASPEED_PINCTRL_PIN(AF10),
@@ -1643,6 +1642,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(AF12),
ASPEED_PINCTRL_PIN(AF14),
ASPEED_PINCTRL_PIN(AF15),
+ ASPEED_PINCTRL_PIN(AF24),
+ ASPEED_PINCTRL_PIN(AF25),
ASPEED_PINCTRL_PIN(AF7),
ASPEED_PINCTRL_PIN(AF8),
ASPEED_PINCTRL_PIN(AF9),
@@ -1792,17 +1793,6 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
ASPEED_PINCTRL_PIN(Y3),
ASPEED_PINCTRL_PIN(Y4),
ASPEED_PINCTRL_PIN(Y5),
- ASPEED_PINCTRL_PIN(AB16),
- ASPEED_PINCTRL_PIN(AA17),
- ASPEED_PINCTRL_PIN(AB17),
- ASPEED_PINCTRL_PIN(AE16),
- ASPEED_PINCTRL_PIN(AC16),
- ASPEED_PINCTRL_PIN(AA16),
- ASPEED_PINCTRL_PIN(AD16),
- ASPEED_PINCTRL_PIN(AF25),
- ASPEED_PINCTRL_PIN(AE26),
- ASPEED_PINCTRL_PIN(AE25),
- ASPEED_PINCTRL_PIN(AF24),
};
static const struct aspeed_pin_group aspeed_g6_groups[] = {
@@ -1976,11 +1966,9 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(SALT9G1),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SD2),
- ASPEED_PINCTRL_GROUP(SD3),
- ASPEED_PINCTRL_GROUP(SD3DAT4),
- ASPEED_PINCTRL_GROUP(SD3DAT5),
- ASPEED_PINCTRL_GROUP(SD3DAT6),
- ASPEED_PINCTRL_GROUP(SD3DAT7),
+ ASPEED_PINCTRL_GROUP(EMMCG1),
+ ASPEED_PINCTRL_GROUP(EMMCG4),
+ ASPEED_PINCTRL_GROUP(EMMCG8),
ASPEED_PINCTRL_GROUP(SGPM1),
ASPEED_PINCTRL_GROUP(SGPS1),
ASPEED_PINCTRL_GROUP(SIOONCTRL),
@@ -2059,6 +2047,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(ADC8),
ASPEED_PINCTRL_FUNC(ADC9),
ASPEED_PINCTRL_FUNC(BMCINT),
+ ASPEED_PINCTRL_FUNC(EMMC),
ASPEED_PINCTRL_FUNC(ESPI),
ASPEED_PINCTRL_FUNC(ESPIALT),
ASPEED_PINCTRL_FUNC(FSI1),
@@ -2191,11 +2180,6 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(SALT9),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SD2),
- ASPEED_PINCTRL_FUNC(SD3),
- ASPEED_PINCTRL_FUNC(SD3DAT4),
- ASPEED_PINCTRL_FUNC(SD3DAT5),
- ASPEED_PINCTRL_FUNC(SD3DAT6),
- ASPEED_PINCTRL_FUNC(SD3DAT7),
ASPEED_PINCTRL_FUNC(SGPM1),
ASPEED_PINCTRL_FUNC(SGPS1),
ASPEED_PINCTRL_FUNC(SIOONCTRL),
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index a2c0d52e4f7b..140c5ce9fbc1 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -508,7 +508,7 @@ struct aspeed_pin_desc {
* @idx: The bit index in the register
*/
#define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
-#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0)
+#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 }
#define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group
#define SIG_DESC_LIST_DECL(sig, group, ...) \
@@ -738,6 +738,7 @@ struct aspeed_pin_desc {
static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
#define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
+#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
#define FUNC_GROUP_DECL(func, ...) \
GROUP_DECL(func, __VA_ARGS__); \
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 6f7d3a2f2e97..42f7ab383ad9 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*
@@ -853,7 +845,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
- if (irq) {
+ if (irq > 0) {
struct irq_chip *irqc;
struct gpio_irq_chip *girq;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 2bf6af7df7d9..9fabc451550e 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
const struct ns2_pin_function *func;
const struct ns2_pin_group *grp;
- if (grp_select > pinctrl->num_groups ||
- func_select > pinctrl->num_functions)
+ if (grp_select >= pinctrl->num_groups ||
+ func_select >= pinctrl->num_functions)
return -EINVAL;
func = &pinctrl->functions[func_select];
diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c
index 44f8ccdbeeff..9dfdc275ee33 100644
--- a/drivers/pinctrl/berlin/pinctrl-as370.c
+++ b/drivers/pinctrl/berlin/pinctrl-as370.c
@@ -43,7 +43,7 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
- BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+ BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */
BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index aae51c507f59..c6251eac8946 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1513,7 +1513,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1521,7 +1520,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1529,7 +1527,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1537,7 +1534,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1f13bcd0e4e1..bc013599a9a3 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -96,6 +96,7 @@ struct intel_pinctrl_context {
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
* @soc: SoC/PCH specific pin configuration data
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
@@ -108,6 +109,7 @@ struct intel_pinctrl {
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
+ struct irq_chip irqchip;
const struct intel_pinctrl_soc_data *soc;
struct intel_community *communities;
size_t ncommunities;
@@ -1139,16 +1141,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return ret;
}
-static struct irq_chip intel_gpio_irqchip = {
- .name = "intel-gpio",
- .irq_ack = intel_gpio_irq_ack,
- .irq_mask = intel_gpio_irq_mask,
- .irq_unmask = intel_gpio_irq_unmask,
- .irq_set_type = intel_gpio_irq_type,
- .irq_set_wake = intel_gpio_irq_wake,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
@@ -1198,12 +1190,22 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
pctrl->chip = intel_gpio_chip;
+ /* Setup GPIO chip */
pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.parent = pctrl->dev;
pctrl->chip.base = -1;
pctrl->irq = irq;
+ /* Setup IRQ chip */
+ pctrl->irqchip.name = dev_name(pctrl->dev);
+ pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
+ pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
+ pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
+ pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
+ pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
+ pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
@@ -1233,15 +1235,14 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return ret;
}
- ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+ ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
- NULL);
+ gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
return 0;
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 6462d3ca7ceb..f2f5fcd9a237 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
18, 2, "gpio", "uart"),
- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
};
@@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
};
static inline void armada_37xx_update_reg(unsigned int *reg,
- unsigned int offset)
+ unsigned int *offset)
{
/* We never have more than 2 registers */
- if (offset >= GPIO_PER_REG) {
- offset -= GPIO_PER_REG;
+ if (*offset >= GPIO_PER_REG) {
+ *offset -= GPIO_PER_REG;
*reg += sizeof(u32);
}
}
@@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
{
int offset = irqd_to_hwirq(d);
- armada_37xx_update_reg(reg, offset);
+ armada_37xx_update_reg(reg, &offset);
}
static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
unsigned int reg = OUTPUT_EN;
unsigned int mask;
- armada_37xx_update_reg(&reg, offset);
+ armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
unsigned int reg = OUTPUT_EN;
unsigned int val, mask;
- armada_37xx_update_reg(&reg, offset);
+ armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
@@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
unsigned int reg = OUTPUT_EN;
unsigned int mask, val, ret;
- armada_37xx_update_reg(&reg, offset);
+ armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
unsigned int reg = INPUT_VAL;
unsigned int val, mask;
- armada_37xx_update_reg(&reg, offset);
+ armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
regmap_read(info->regmap, reg, &val);
@@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
unsigned int reg = OUTPUT_VAL;
unsigned int mask, val;
- armada_37xx_update_reg(&reg, offset);
+ armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
val = value ? mask : 0;
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 974973777395..564660028fcc 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -705,7 +705,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
static int stmfx_pinctrl_remove(struct platform_device *pdev)
{
- struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+ struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
return stmfx_function_disable(stmfx,
STMFX_FUNC_GPIO |
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 86cc2cc68fb5..af063f690846 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -420,12 +420,6 @@ failed_sensitivity:
static int cmpc_accel_remove_v4(struct acpi_device *acpi)
{
- struct input_dev *inputdev;
- struct cmpc_accel *accel;
-
- inputdev = dev_get_drvdata(&acpi->dev);
- accel = dev_get_drvdata(&inputdev->dev);
-
device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
return cmpc_remove_acpi_notify_device(acpi);
@@ -656,12 +650,6 @@ failed_file:
static int cmpc_accel_remove(struct acpi_device *acpi)
{
- struct input_dev *inputdev;
- struct cmpc_accel *accel;
-
- inputdev = dev_get_drvdata(&acpi->dev);
- accel = dev_get_drvdata(&inputdev->dev);
-
device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
return cmpc_remove_acpi_notify_device(acpi);
}
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index ea68f6ed66ae..ffb8d5d1eb5f 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -108,6 +108,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
if (ret < 0) {
dev_dbg(dev, "Error requesting irq at index %d: %d\n",
inst_data[i].irq_idx, ret);
+ goto error;
}
board_info.irq = ret;
break;
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index ab7ae1950867..fa97834fdb78 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -293,9 +293,8 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, punit_ipcdev);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- punit_ipcdev->irq = 0;
dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
} else {
ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 960961fb0d7c..c48ad23cfa3a 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -97,8 +97,8 @@ config PTP_1588_CLOCK_PCH
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
- when using the end-to-end delay (E2E) mechansim. The peer
- delay mechansim (P2P) is not supported.
+ when using the end-to-end delay (E2E) mechanism. The peer
+ delay mechanism (P2P) is not supported.
This clock is only useful if your PTP programs are getting
hardware time stamps on the PTP Ethernet packets using the
@@ -119,4 +119,16 @@ config PTP_1588_CLOCK_KVM
To compile this driver as a module, choose M here: the module
will be called ptp_kvm.
+config PTP_1588_CLOCK_IDTCM
+ tristate "IDT CLOCKMATRIX as PTP clock"
+ select PTP_1588_CLOCK
+ default n
+ help
+ This driver adds support for using IDT CLOCKMATRIX(TM) as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_clockmatrix.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 677d1d178a3e..69a06f86a450 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o \ No newline at end of file
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
new file mode 100644
index 000000000000..9263bc33b8f4
--- /dev/null
+++ b/drivers/ptp/idt8a340_reg.h
@@ -0,0 +1,659 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* idt8a340_reg.h
+ *
+ * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
+ * https://github.com/richardcochran/regen
+ *
+ * Hand modified to include some HW registers.
+ * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+
+#define STATUS 0xc03c
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+
+#define INPUT_1 0xc1c0
+
+#define INPUT_2 0xc1d0
+
+#define INPUT_3 0xc200
+
+#define INPUT_4 0xc210
+
+#define INPUT_5 0xc220
+
+#define INPUT_6 0xc230
+
+#define INPUT_7 0xc240
+
+#define INPUT_8 0xc250
+
+#define INPUT_9 0xc260
+
+#define INPUT_10 0xc280
+
+#define INPUT_11 0xc290
+
+#define INPUT_12 0xc2a0
+
+#define INPUT_13 0xc2b0
+
+#define INPUT_14 0xc2c0
+
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+
+#define REF_MON_1 0xc2ec
+
+#define REF_MON_2 0xc300
+
+#define REF_MON_3 0xc30c
+
+#define REF_MON_4 0xc318
+
+#define REF_MON_5 0xc324
+
+#define REF_MON_6 0xc330
+
+#define REF_MON_7 0xc33c
+
+#define REF_MON_8 0xc348
+
+#define REF_MON_9 0xc354
+
+#define REF_MON_10 0xc360
+
+#define REF_MON_11 0xc36c
+
+#define REF_MON_12 0xc380
+
+#define REF_MON_13 0xc38c
+
+#define REF_MON_14 0xc398
+
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+
+#define DPLL_1 0xc400
+
+#define DPLL_2 0xc438
+
+#define DPLL_3 0xc480
+
+#define DPLL_4 0xc4b8
+
+#define DPLL_5 0xc500
+
+#define DPLL_6 0xc538
+
+#define DPLL_7 0xc580
+
+#define SYS_DPLL 0xc5b8
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+
+#define DPLL_CTRL_1 0xc63c
+
+#define DPLL_CTRL_2 0xc680
+
+#define DPLL_CTRL_3 0xc6bc
+
+#define DPLL_CTRL_4 0xc700
+
+#define DPLL_CTRL_5 0xc73c
+
+#define DPLL_CTRL_6 0xc780
+
+#define DPLL_CTRL_7 0xc7bc
+
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+
+#define DPLL_PHASE_1 0xc81c
+
+#define DPLL_PHASE_2 0xc820
+
+#define DPLL_PHASE_3 0xc824
+
+#define DPLL_PHASE_4 0xc828
+
+#define DPLL_PHASE_5 0xc82c
+
+#define DPLL_PHASE_6 0xc830
+
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+
+#define DPLL_FREQ_1 0xc840
+
+#define DPLL_FREQ_2 0xc848
+
+#define DPLL_FREQ_3 0xc850
+
+#define DPLL_FREQ_4 0xc858
+
+#define DPLL_FREQ_5 0xc860
+
+#define DPLL_FREQ_6 0xc868
+
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+
+#define DPLL_PHASE_PULL_IN_1 0xc888
+
+#define DPLL_PHASE_PULL_IN_2 0xc890
+
+#define DPLL_PHASE_PULL_IN_3 0xc898
+
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+
+#define GPIO_1 0xc8d4
+
+#define GPIO_2 0xc8e6
+
+#define GPIO_3 0xc900
+
+#define GPIO_4 0xc912
+
+#define GPIO_5 0xc924
+
+#define GPIO_6 0xc936
+
+#define GPIO_7 0xc948
+
+#define GPIO_8 0xc95a
+
+#define GPIO_9 0xc980
+
+#define GPIO_10 0xc992
+
+#define GPIO_11 0xc9a4
+
+#define GPIO_12 0xc9b6
+
+#define GPIO_13 0xc9c8
+
+#define GPIO_14 0xc9da
+
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+
+#define OUTPUT_0 0xca14
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+
+#define OUTPUT_1 0xca24
+
+#define OUTPUT_2 0xca34
+
+#define OUTPUT_3 0xca44
+
+#define OUTPUT_4 0xca54
+
+#define OUTPUT_5 0xca64
+
+#define OUTPUT_6 0xca80
+
+#define OUTPUT_7 0xca90
+
+#define OUTPUT_8 0xcaa0
+
+#define OUTPUT_9 0xcab0
+
+#define OUTPUT_10 0xcac0
+
+#define OUTPUT_11 0xcad0
+
+#define SERIAL 0xcae0
+
+#define PWM_ENCODER_0 0xcb00
+
+#define PWM_ENCODER_1 0xcb08
+
+#define PWM_ENCODER_2 0xcb10
+
+#define PWM_ENCODER_3 0xcb18
+
+#define PWM_ENCODER_4 0xcb20
+
+#define PWM_ENCODER_5 0xcb28
+
+#define PWM_ENCODER_6 0xcb30
+
+#define PWM_ENCODER_7 0xcb38
+
+#define PWM_DECODER_0 0xcb40
+
+#define PWM_DECODER_1 0xcb48
+
+#define PWM_DECODER_2 0xcb50
+
+#define PWM_DECODER_3 0xcb58
+
+#define PWM_DECODER_4 0xcb60
+
+#define PWM_DECODER_5 0xcb68
+
+#define PWM_DECODER_6 0xcb70
+
+#define PWM_DECODER_7 0xcb80
+
+#define PWM_DECODER_8 0xcb88
+
+#define PWM_DECODER_9 0xcb90
+
+#define PWM_DECODER_10 0xcb98
+
+#define PWM_DECODER_11 0xcba0
+
+#define PWM_DECODER_12 0xcba8
+
+#define PWM_DECODER_13 0xcbb0
+
+#define PWM_DECODER_14 0xcbb8
+
+#define PWM_DECODER_15 0xcbc0
+
+#define PWM_USER_DATA 0xcbc8
+
+#define TOD_0 0xcbcc
+
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+
+#define TOD_1 0xcbce
+
+#define TOD_2 0xcbd0
+
+#define TOD_3 0xcbd2
+
+
+#define TOD_WRITE_0 0xcc00
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+
+#define TOD_WRITE_1 0xcc10
+
+#define TOD_WRITE_2 0xcc20
+
+#define TOD_WRITE_3 0xcc30
+
+#define TOD_READ_PRIMARY_0 0xcc40
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+
+#define TOD_READ_PRIMARY_1 0xcc50
+
+#define TOD_READ_PRIMARY_2 0xcc60
+
+#define TOD_READ_PRIMARY_3 0xcc80
+
+#define TOD_READ_SECONDARY_0 0xcc90
+
+#define TOD_READ_SECONDARY_1 0xcca0
+
+#define TOD_READ_SECONDARY_2 0xccb0
+
+#define TOD_READ_SECONDARY_3 0xccc0
+
+#define OUTPUT_TDC_CFG 0xccd0
+
+#define OUTPUT_TDC_0 0xcd00
+
+#define OUTPUT_TDC_1 0xcd08
+
+#define OUTPUT_TDC_2 0xcd10
+
+#define OUTPUT_TDC_3 0xcd18
+
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+
+#define EEPROM 0xcf68
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+#endif
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
new file mode 100644
index 000000000000..cf5889b7d825
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -0,0 +1,1425 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+
+#include "ptp_private.h"
+#include "ptp_clockmatrix.h"
+
+MODULE_DESCRIPTION("Driver for IDT ClockMatrix(TM) family");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+#define SETTIME_CORRECTION (0)
+
+static int char_array_to_timespec(u8 *buf,
+ u8 count,
+ struct timespec64 *ts)
+{
+ u8 i;
+ u64 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ nsec = buf[4];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[3 - i];
+ }
+
+ sec = buf[10];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[9 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
+static int timespec_to_char_array(struct timespec64 const *ts,
+ u8 *buf,
+ u8 count)
+{
+ u8 i;
+ s32 nsec;
+ time64_t sec;
+
+ if (count < TOD_BYTE_COUNT)
+ return 1;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ /* Sub-nanoseconds are in buf[0]. */
+ buf[0] = 0;
+ for (i = 1; i < 5; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 5; i < TOD_BYTE_COUNT; i++) {
+
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+
+ return 0;
+}
+
+static int idtcm_xfer(struct idtcm *idtcm,
+ u8 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ struct i2c_client *client = idtcm->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
+{
+ u8 buf[4];
+ int err;
+
+ if (idtcm->page_offset == val)
+ return 0;
+
+ buf[0] = 0x0;
+ buf[1] = val;
+ buf[2] = 0x10;
+ buf[3] = 0x20;
+
+ err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1);
+
+ if (err)
+ dev_err(&idtcm->client->dev, "failed to set page offset\n");
+ else
+ idtcm->page_offset = val;
+
+ return err;
+}
+
+static int _idtcm_rdwr(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 *buf,
+ u16 count,
+ bool write)
+{
+ u8 hi;
+ u8 lo;
+ int err;
+
+ hi = (regaddr >> 8) & 0xff;
+ lo = regaddr & 0xff;
+
+ err = idtcm_page_offset(idtcm, hi);
+
+ if (err)
+ goto out;
+
+ err = idtcm_xfer(idtcm, lo, buf, count, write);
+out:
+ return err;
+}
+
+static int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
+}
+
+static int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+}
+
+static int _idtcm_gettime(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+ if (err)
+ return err;
+
+ trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
+ trigger |= TOD_READ_TRIGGER_MODE;
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idtcm->calculate_overhead_flag)
+ idtcm->start_time = ktime_get_raw();
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = char_array_to_timespec(buf, sizeof(buf), ts);
+
+ return err;
+}
+
+static int _sync_pll_output(struct idtcm *idtcm,
+ u8 pll,
+ u8 sync_src,
+ u8 qn,
+ u8 qn_plus_1)
+{
+ int err;
+ u8 val;
+ u16 sync_ctrl0;
+ u16 sync_ctrl1;
+
+ if ((qn == 0) && (qn_plus_1 == 0))
+ return 0;
+
+ switch (pll) {
+ case 0:
+ sync_ctrl0 = HW_Q0_Q1_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q0_Q1_CH_SYNC_CTRL_1;
+ break;
+ case 1:
+ sync_ctrl0 = HW_Q2_Q3_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q2_Q3_CH_SYNC_CTRL_1;
+ break;
+ case 2:
+ sync_ctrl0 = HW_Q4_Q5_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q4_Q5_CH_SYNC_CTRL_1;
+ break;
+ case 3:
+ sync_ctrl0 = HW_Q6_Q7_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q6_Q7_CH_SYNC_CTRL_1;
+ break;
+ case 4:
+ sync_ctrl0 = HW_Q8_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q8_CH_SYNC_CTRL_1;
+ break;
+ case 5:
+ sync_ctrl0 = HW_Q9_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q9_CH_SYNC_CTRL_1;
+ break;
+ case 6:
+ sync_ctrl0 = HW_Q10_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q10_CH_SYNC_CTRL_1;
+ break;
+ case 7:
+ sync_ctrl0 = HW_Q11_CH_SYNC_CTRL_0;
+ sync_ctrl1 = HW_Q11_CH_SYNC_CTRL_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = SYNCTRL1_MASTER_SYNC_RST;
+
+ /* Place master sync in reset */
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl0, &sync_src, sizeof(sync_src));
+ if (err)
+ return err;
+
+ /* Set sync trigger mask */
+ val |= SYNCTRL1_FBDIV_FRAME_SYNC_TRIG | SYNCTRL1_FBDIV_SYNC_TRIG;
+
+ if (qn)
+ val |= SYNCTRL1_Q0_DIV_SYNC_TRIG;
+
+ if (qn_plus_1)
+ val |= SYNCTRL1_Q1_DIV_SYNC_TRIG;
+
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+ if (err)
+ return err;
+
+ /* Place master sync out of reset */
+ val &= ~(SYNCTRL1_MASTER_SYNC_RST);
+ err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val));
+
+ return err;
+}
+
+static int idtcm_sync_pps_output(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 pll;
+ u8 sync_src;
+ u8 qn;
+ u8 qn_plus_1;
+ int err = 0;
+
+ u16 output_mask = channel->output_mask;
+
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
+ break;
+ case DPLL_1:
+ sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
+ break;
+ case DPLL_2:
+ sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
+ break;
+ case DPLL_3:
+ sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (pll = 0; pll < 8; pll++) {
+
+ qn = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+
+ if (pll < 4) {
+ /* First 4 pll has 2 outputs */
+ qn_plus_1 = output_mask & 0x1;
+ output_mask = output_mask >> 1;
+ } else {
+ qn_plus_1 = 0;
+ }
+
+ if ((qn != 0) || (qn_plus_1 != 0))
+ err = _sync_pll_output(idtcm, pll, sync_src, qn,
+ qn_plus_1);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idtcm_set_dpll_tod(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[TOD_BYTE_COUNT];
+ u8 cmd;
+ int err;
+ struct timespec64 local_ts = *ts;
+ s64 total_overhead_ns;
+
+ /* Configure HW TOD write trigger. */
+ err = idtcm_read(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ cmd &= ~(0x0f);
+ cmd |= wr_trig | 0x08;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (err)
+ return err;
+
+ if (wr_trig != HW_TOD_WR_TRIG_SEL_MSB) {
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+ }
+
+ /* ARM HW TOD write trigger. */
+ cmd &= ~(0x08);
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_CTRL_1,
+ &cmd, sizeof(cmd));
+
+ if (wr_trig == HW_TOD_WR_TRIG_SEL_MSB) {
+
+ if (idtcm->calculate_overhead_flag) {
+ total_overhead_ns = ktime_to_ns(ktime_get_raw()
+ - idtcm->start_time)
+ + idtcm->tod_write_overhead_ns
+ + SETTIME_CORRECTION;
+
+ timespec64_add_ns(&local_ts, total_overhead_ns);
+
+ idtcm->calculate_overhead_flag = 0;
+ }
+
+ err = timespec_to_char_array(&local_ts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+ }
+
+ return err;
+}
+
+static int _idtcm_settime(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ s32 retval;
+ int err;
+ int i;
+ u8 trig_sel;
+
+ err = _idtcm_set_dpll_tod(channel, ts, wr_trig);
+
+ if (err)
+ return err;
+
+ /* Wait for the operation to complete. */
+ for (i = 0; i < 10000; i++) {
+ err = idtcm_read(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_CTRL_1, &trig_sel,
+ sizeof(trig_sel));
+
+ if (err)
+ return err;
+
+ if (trig_sel == 0x4a)
+ break;
+
+ err = 1;
+ }
+
+ if (err)
+ return err;
+
+ retval = idtcm_sync_pps_output(channel);
+
+ return retval;
+}
+
+static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel,
+ s32 offset_ns)
+{
+ int err;
+ int i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[4];
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = 0xff & (offset_ns);
+ offset_ns >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in, PULL_IN_OFFSET,
+ buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_set_phase_pull_in_slope_limit(struct idtcm_channel *channel,
+ u32 max_ffo_ppb)
+{
+ int err;
+ u8 i;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf[3];
+
+ if (max_ffo_ppb & 0xff000000)
+ max_ffo_ppb = 0;
+
+ for (i = 0; i < 3; i++) {
+ buf[i] = 0xff & (max_ffo_ppb);
+ max_ffo_ppb >>= 8;
+ }
+
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_SLOPE_LIMIT, buf, sizeof(buf));
+
+ return err;
+}
+
+static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+
+ u8 buf;
+
+ err = idtcm_read(idtcm, channel->dpll_phase_pull_in, PULL_IN_CTRL,
+ &buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ if (buf == 0) {
+ buf = 0x01;
+ err = idtcm_write(idtcm, channel->dpll_phase_pull_in,
+ PULL_IN_CTRL, &buf, sizeof(buf));
+ } else {
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
+{
+ int err;
+
+ err = idtcm_set_phase_pull_in_offset(channel, -offset_ns);
+
+ if (err)
+ return err;
+
+ err = idtcm_set_phase_pull_in_slope_limit(channel, max_ffo_ppb);
+
+ if (err)
+ return err;
+
+ err = idtcm_start_phase_pull_in(channel);
+
+ return err;
+}
+
+static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
+{
+ int err;
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts;
+ s64 now;
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = idtcm_do_phase_pull_in(channel, delta, 0);
+ } else {
+ idtcm->calculate_overhead_flag = 1;
+
+ err = _idtcm_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now = timespec64_to_ns(&ts);
+ now += delta;
+
+ ts = ns_to_timespec64(now);
+
+ err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ }
+
+ return err;
+}
+
+static int idtcm_state_machine_reset(struct idtcm *idtcm)
+{
+ int err;
+ u8 byte = SM_RESET_CMD;
+
+ err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+
+ if (!err)
+ msleep_interruptible(POST_SM_RESET_DELAY_MS);
+
+ return err;
+}
+
+static int idtcm_read_hw_rev_id(struct idtcm *idtcm, u8 *hw_rev_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HW_REV_ID,
+ hw_rev_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_bond_id(struct idtcm *idtcm, u8 *bond_id)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ BOND_ID,
+ bond_id,
+ sizeof(u8));
+}
+
+static int idtcm_read_hw_csr_id(struct idtcm *idtcm, u16 *hw_csr_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_CSR_ID, buf, sizeof(buf));
+
+ *hw_csr_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_hw_irq_id(struct idtcm *idtcm, u16 *hw_irq_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, HW_IRQ_ID, buf, sizeof(buf));
+
+ *hw_irq_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_product_id(struct idtcm *idtcm, u16 *product_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, PRODUCT_ID, buf, sizeof(buf));
+
+ *product_id = (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int idtcm_read_major_release(struct idtcm *idtcm, u8 *major)
+{
+ int err;
+ u8 buf = 0;
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, MAJ_REL, &buf, sizeof(buf));
+
+ *major = buf >> 1;
+
+ return err;
+}
+
+static int idtcm_read_minor_release(struct idtcm *idtcm, u8 *minor)
+{
+ return idtcm_read(idtcm, GENERAL_STATUS, MIN_REL, minor, sizeof(u8));
+}
+
+static int idtcm_read_hotfix_release(struct idtcm *idtcm, u8 *hotfix)
+{
+ return idtcm_read(idtcm,
+ GENERAL_STATUS,
+ HOTFIX_REL,
+ hotfix,
+ sizeof(u8));
+}
+
+static int idtcm_read_pipeline(struct idtcm *idtcm, u32 *pipeline)
+{
+ int err;
+ u8 buf[4] = {0};
+
+ err = idtcm_read(idtcm,
+ GENERAL_STATUS,
+ PIPELINE_ID,
+ &buf[0],
+ sizeof(buf));
+
+ *pipeline = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask)
+{
+ int err = 0;
+
+ if (addr == PLL_MASK_ADDR) {
+ if ((val & 0xf0) || !(val & 0xf)) {
+ dev_err(&idtcm->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ }
+ *mask = val;
+ }
+
+ return err;
+}
+
+static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
+{
+ int err = 0;
+
+ switch (addr) {
+ case OUTPUT_MASK_PLL0_ADDR:
+ SET_U16_LSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL0_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[0].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR:
+ SET_U16_LSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL1_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[1].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR:
+ SET_U16_LSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL2_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[2].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR:
+ SET_U16_LSB(idtcm->channel[3].output_mask, val);
+ break;
+ case OUTPUT_MASK_PLL3_ADDR + 1:
+ SET_U16_MSB(idtcm->channel[3].output_mask, val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int check_and_set_masks(struct idtcm *idtcm,
+ u16 regaddr,
+ u8 val)
+{
+ int err = 0;
+
+ if (set_pll_output_mask(idtcm, regaddr, val)) {
+ /* Not an output mask, check for pll mask */
+ err = process_pll_mask(idtcm, regaddr, val, &idtcm->pll_mask);
+ }
+
+ return err;
+}
+
+static void display_pll_and_output_masks(struct idtcm *idtcm)
+{
+ u8 i;
+ u8 mask;
+
+ dev_dbg(&idtcm->client->dev, "pllmask = 0x%02x\n", idtcm->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idtcm->pll_mask)
+ dev_dbg(&idtcm->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idtcm->channel[i].output_mask);
+ }
+}
+
+static int idtcm_load_firmware(struct idtcm *idtcm,
+ struct device *dev)
+{
+ const struct firmware *fw;
+ struct idtcm_fwrc *rec;
+ u32 regaddr;
+ int err;
+ s32 len;
+ u8 val;
+ u8 loaddr;
+
+ dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtcm_fwrc *) fw->data;
+
+ if (fw->size > 0)
+ idtcm_state_machine_reset(idtcm);
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idtcm->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ regaddr = rec->hiaddr << 8;
+ regaddr |= rec->loaddr;
+
+ val = rec->value;
+ loaddr = rec->loaddr;
+
+ rec++;
+
+ err = check_and_set_masks(idtcm, regaddr, val);
+ }
+
+ if (err == 0) {
+ /* Top (status registers) and bottom are read-only */
+ if ((regaddr < GPIO_USER_CONTROL)
+ || (regaddr >= SCRATCH))
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ continue;
+
+ err = idtcm_write(idtcm, regaddr, 0, &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ display_pll_and_output_masks(idtcm);
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u32 module;
+ u8 val;
+ int err;
+
+ /*
+ * This assumes that the 1-PPS is on the second of the two
+ * output. But is this always true?
+ */
+ switch (channel->dpll_n) {
+ case DPLL_0:
+ module = OUTPUT_1;
+ break;
+ case DPLL_1:
+ module = OUTPUT_3;
+ break;
+ case DPLL_2:
+ module = OUTPUT_5;
+ break;
+ case DPLL_3:
+ module = OUTPUT_7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = idtcm_read(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ if (enable)
+ val |= SQUELCH_DISABLE;
+ else
+ val &= ~SQUELCH_DISABLE;
+
+ err = idtcm_write(idtcm, module, OUT_CTRL_1, &val, sizeof(val));
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int idtcm_set_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode pll_mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 dpll_mode;
+
+ err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
+
+ channel->pll_mode = pll_mode;
+
+ err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* PTP Hardware Clock interface */
+
+static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ u8 i;
+ bool neg_adj = 0;
+ int err;
+ u8 buf[6] = {0};
+ s64 fcw;
+
+ if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Frequency Control Word unit is: 1.11 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 111
+ *
+ * adjfine:
+ * ppm_16 * 5^12
+ * FCW = -------------
+ * 111 * 2^4
+ */
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
+ fcw = ppb * 1000000000000ULL;
+
+ fcw = div_u64(fcw, 111022);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 6; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ,
+ buf, sizeof(buf));
+
+ mutex_unlock(&idtcm->reg_lock);
+ return err;
+}
+
+static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_gettime(channel, ts);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ mutex_lock(&idtcm->reg_lock);
+
+ err = _idtcm_adjtime(channel, delta);
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ return err;
+}
+
+static int idtcm_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtcm_channel *channel =
+ container_of(ptp, struct idtcm_channel, caps);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ return idtcm_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ return -ERANGE;
+
+ return idtcm_pps_enable(channel, true);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int idtcm_enable_tod(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ struct timespec64 ts = {0, 0};
+ u8 cfg;
+ int err;
+
+ err = idtcm_pps_enable(channel, false);
+ if (err)
+ return err;
+
+ /*
+ * Start the TOD clock ticking.
+ */
+ err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ cfg |= TOD_ENABLE;
+
+ err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ if (err)
+ return err;
+
+ return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+}
+
+static void idtcm_display_version_info(struct idtcm *idtcm)
+{
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u32 pipeline;
+ u16 product_id;
+ u16 csr_id;
+ u16 irq_id;
+ u8 hw_rev_id;
+ u8 bond_id;
+
+ idtcm_read_major_release(idtcm, &major);
+ idtcm_read_minor_release(idtcm, &minor);
+ idtcm_read_hotfix_release(idtcm, &hotfix);
+ idtcm_read_pipeline(idtcm, &pipeline);
+
+ idtcm_read_product_id(idtcm, &product_id);
+ idtcm_read_hw_rev_id(idtcm, &hw_rev_id);
+ idtcm_read_bond_id(idtcm, &bond_id);
+ idtcm_read_hw_csr_id(idtcm, &csr_id);
+ idtcm_read_hw_irq_id(idtcm, &irq_id);
+
+ dev_info(&idtcm->client->dev, "Version: %d.%d.%d, Pipeline %u\t"
+ "0x%04x, Rev %d, Bond %d, CSR %d, IRQ %d\n",
+ major, minor, hotfix, pipeline,
+ product_id, hw_rev_id, bond_id, csr_id, irq_id);
+}
+
+static struct ptp_clock_info idtcm_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = 244000,
+ .n_per_out = 1,
+ .adjfreq = &idtcm_adjfreq,
+ .adjtime = &idtcm_adjtime,
+ .gettime64 = &idtcm_gettime,
+ .settime64 = &idtcm_settime,
+ .enable = &idtcm_enable,
+};
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
+ switch (index) {
+ case 0:
+ channel->dpll_freq = DPLL_FREQ_0;
+ channel->dpll_n = DPLL_0;
+ channel->tod_read_primary = TOD_READ_PRIMARY_0;
+ channel->tod_write = TOD_WRITE_0;
+ channel->tod_n = TOD_0;
+ channel->hw_dpll_n = HW_DPLL_0;
+ channel->dpll_phase = DPLL_PHASE_0;
+ channel->dpll_ctrl_n = DPLL_CTRL_0;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_0;
+ break;
+ case 1:
+ channel->dpll_freq = DPLL_FREQ_1;
+ channel->dpll_n = DPLL_1;
+ channel->tod_read_primary = TOD_READ_PRIMARY_1;
+ channel->tod_write = TOD_WRITE_1;
+ channel->tod_n = TOD_1;
+ channel->hw_dpll_n = HW_DPLL_1;
+ channel->dpll_phase = DPLL_PHASE_1;
+ channel->dpll_ctrl_n = DPLL_CTRL_1;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_1;
+ break;
+ case 2:
+ channel->dpll_freq = DPLL_FREQ_2;
+ channel->dpll_n = DPLL_2;
+ channel->tod_read_primary = TOD_READ_PRIMARY_2;
+ channel->tod_write = TOD_WRITE_2;
+ channel->tod_n = TOD_2;
+ channel->hw_dpll_n = HW_DPLL_2;
+ channel->dpll_phase = DPLL_PHASE_2;
+ channel->dpll_ctrl_n = DPLL_CTRL_2;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_2;
+ break;
+ case 3:
+ channel->dpll_freq = DPLL_FREQ_3;
+ channel->dpll_n = DPLL_3;
+ channel->tod_read_primary = TOD_READ_PRIMARY_3;
+ channel->tod_write = TOD_WRITE_3;
+ channel->tod_n = TOD_3;
+ channel->hw_dpll_n = HW_DPLL_3;
+ channel->dpll_phase = DPLL_PHASE_3;
+ channel->dpll_ctrl_n = DPLL_CTRL_3;
+ channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ channel->idtcm = idtcm;
+
+ channel->caps = idtcm_caps;
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT CM PLL%u", index);
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (err)
+ return err;
+
+ err = idtcm_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static void ptp_clock_unregister_all(struct idtcm *idtcm)
+{
+ u8 i;
+ struct idtcm_channel *channel;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idtcm->channel[i];
+
+ if (channel->ptp_clock)
+ ptp_clock_unregister(channel->ptp_clock);
+ }
+}
+
+static void set_default_masks(struct idtcm *idtcm)
+{
+ idtcm->pll_mask = DEFAULT_PLL_MASK;
+
+ idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+ idtcm->channel[2].output_mask = DEFAULT_OUTPUT_MASK_PLL2;
+ idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
+}
+
+static int set_tod_write_overhead(struct idtcm *idtcm)
+{
+ int err;
+ u8 i;
+
+ s64 total_ns = 0;
+
+ ktime_t start;
+ ktime_t stop;
+
+ char buf[TOD_BYTE_COUNT];
+
+ struct idtcm_channel *channel = &idtcm->channel[2];
+
+ /* Set page offset */
+ idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0,
+ buf, sizeof(buf));
+
+ for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) {
+
+ start = ktime_get_raw();
+
+ err = idtcm_write(idtcm, channel->hw_dpll_n,
+ HW_DPLL_TOD_OVR__0, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop - start);
+ }
+
+ idtcm->tod_write_overhead_ns = div_s64(total_ns,
+ TOD_WRITE_OVERHEAD_COUNT_MAX);
+
+ return err;
+}
+
+static int idtcm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idtcm *idtcm;
+ int err;
+ u8 i;
+
+ /* Unused for now */
+ (void)id;
+
+ idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+
+ if (!idtcm)
+ return -ENOMEM;
+
+ idtcm->client = client;
+ idtcm->page_offset = 0xff;
+ idtcm->calculate_overhead_flag = 0;
+
+ set_default_masks(idtcm);
+
+ mutex_init(&idtcm->reg_lock);
+ mutex_lock(&idtcm->reg_lock);
+
+ idtcm_display_version_info(idtcm);
+
+ err = set_tod_write_overhead(idtcm);
+
+ if (err)
+ return err;
+
+ err = idtcm_load_firmware(idtcm, &client->dev);
+
+ if (err)
+ dev_warn(&idtcm->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idtcm->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idtcm->pll_mask & (1 << i)) {
+ err = idtcm_enable_channel(idtcm, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idtcm->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idtcm->reg_lock);
+
+ if (err) {
+ ptp_clock_unregister_all(idtcm);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idtcm);
+
+ return 0;
+}
+
+static int idtcm_remove(struct i2c_client *client)
+{
+ struct idtcm *idtcm = i2c_get_clientdata(client);
+
+ ptp_clock_unregister_all(idtcm);
+
+ mutex_destroy(&idtcm->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idtcm_dt_id[] = {
+ { .compatible = "idt,8a34000" },
+ { .compatible = "idt,8a34001" },
+ { .compatible = "idt,8a34002" },
+ { .compatible = "idt,8a34003" },
+ { .compatible = "idt,8a34004" },
+ { .compatible = "idt,8a34005" },
+ { .compatible = "idt,8a34006" },
+ { .compatible = "idt,8a34007" },
+ { .compatible = "idt,8a34008" },
+ { .compatible = "idt,8a34009" },
+ { .compatible = "idt,8a34010" },
+ { .compatible = "idt,8a34011" },
+ { .compatible = "idt,8a34012" },
+ { .compatible = "idt,8a34013" },
+ { .compatible = "idt,8a34014" },
+ { .compatible = "idt,8a34015" },
+ { .compatible = "idt,8a34016" },
+ { .compatible = "idt,8a34017" },
+ { .compatible = "idt,8a34018" },
+ { .compatible = "idt,8a34019" },
+ { .compatible = "idt,8a34040" },
+ { .compatible = "idt,8a34041" },
+ { .compatible = "idt,8a34042" },
+ { .compatible = "idt,8a34043" },
+ { .compatible = "idt,8a34044" },
+ { .compatible = "idt,8a34045" },
+ { .compatible = "idt,8a34046" },
+ { .compatible = "idt,8a34047" },
+ { .compatible = "idt,8a34048" },
+ { .compatible = "idt,8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idtcm_dt_id);
+#endif
+
+static const struct i2c_device_id idtcm_i2c_id[] = {
+ { "8a34000" },
+ { "8a34001" },
+ { "8a34002" },
+ { "8a34003" },
+ { "8a34004" },
+ { "8a34005" },
+ { "8a34006" },
+ { "8a34007" },
+ { "8a34008" },
+ { "8a34009" },
+ { "8a34010" },
+ { "8a34011" },
+ { "8a34012" },
+ { "8a34013" },
+ { "8a34014" },
+ { "8a34015" },
+ { "8a34016" },
+ { "8a34017" },
+ { "8a34018" },
+ { "8a34019" },
+ { "8a34040" },
+ { "8a34041" },
+ { "8a34042" },
+ { "8a34043" },
+ { "8a34044" },
+ { "8a34045" },
+ { "8a34046" },
+ { "8a34047" },
+ { "8a34048" },
+ { "8a34049" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
+
+static struct i2c_driver idtcm_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idtcm_dt_id),
+ .name = "idtcm",
+ },
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
+ .id_table = idtcm_i2c_id,
+};
+
+module_i2c_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
new file mode 100644
index 000000000000..6c1f93ab46f3
--- /dev/null
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT ClockMatrix(TM) family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTCLOCKMATRIX_H
+#define PTP_IDTCLOCKMATRIX_H
+
+#include <linux/ktime.h>
+
+#include "idt8a340_reg.h"
+
+#define FW_FILENAME "idtcm.bin"
+#define MAX_PHC_PLL 4
+
+#define PLL_MASK_ADDR (0xFFA5)
+#define DEFAULT_PLL_MASK (0x04)
+
+#define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8))
+#define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8))
+
+#define OUTPUT_MASK_PLL0_ADDR (0xFFB0)
+#define OUTPUT_MASK_PLL1_ADDR (0xFFB2)
+#define OUTPUT_MASK_PLL2_ADDR (0xFFB4)
+#define OUTPUT_MASK_PLL3_ADDR (0xFFB6)
+
+#define DEFAULT_OUTPUT_MASK_PLL0 (0x003)
+#define DEFAULT_OUTPUT_MASK_PLL1 (0x00c)
+#define DEFAULT_OUTPUT_MASK_PLL2 (0x030)
+#define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0)
+
+#define POST_SM_RESET_DELAY_MS (3000)
+#define PHASE_PULL_IN_THRESHOLD_NS (150000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (5)
+#define TOD_BYTE_COUNT (11)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_NORMAL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_MAX = PLL_MODE_PHASE_MEASUREMENT,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+struct idtcm;
+
+struct idtcm_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idtcm *idtcm;
+ u16 dpll_phase;
+ u16 dpll_freq;
+ u16 dpll_n;
+ u16 dpll_ctrl_n;
+ u16 dpll_phase_pull_in;
+ u16 tod_read_primary;
+ u16 tod_write;
+ u16 tod_n;
+ u16 hw_dpll_n;
+ enum pll_mode pll_mode;
+ u16 output_mask;
+};
+
+struct idtcm {
+ struct idtcm_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+
+ /* Overhead calculation for adjtime */
+ u8 calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ ktime_t start_time;
+
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+struct idtcm_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+#endif /* PTP_IDTCLOCKMATRIX_H */
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 0dcfdc806f57..82d31ba32690 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -240,14 +240,12 @@ static int ptp_dte_probe(struct platform_device *pdev)
{
struct ptp_dte *ptp_dte;
struct device *dev = &pdev->dev;
- struct resource *res;
ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
if (!ptp_dte)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ptp_dte->regs = devm_ioremap_resource(dev, res);
+ ptp_dte->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ptp_dte->regs))
return PTR_ERR(ptp_dte->regs);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index c61f00b72e15..a577218d1ab7 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
}
+ spin_lock_init(&ptp_qoriq->lock);
+
ktime_get_real_ts64(&now);
ptp_qoriq_settime(&ptp_qoriq->caps, &now);
@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
(ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
(ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
- spin_lock_init(&ptp_qoriq->lock);
spin_lock_irqsave(&ptp_qoriq->lock, flags);
regs = &ptp_qoriq->regs;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index afe94470b67f..a46be221dbdc 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5053,6 +5053,19 @@ regulator_register(const struct regulator_desc *regulator_desc,
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
&rdev->dev.of_node);
+
+ /*
+ * Sometimes not all resources are probed already so we need to take
+ * that into account. This happens most the time if the ena_gpiod comes
+ * from a gpio extender or something else.
+ */
+ if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+ kfree(config);
+ kfree(rdev);
+ ret = -EPROBE_DEFER;
+ goto rinse;
+ }
+
/*
* We need to keep track of any GPIO descriptor coming from the
* device tree until we have handled it over to the core. If the
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 56f3f72d7707..710e67081d53 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -136,7 +136,6 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
unsigned int val, mode = 0;
int ret;
@@ -158,18 +157,7 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
}
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
@@ -208,21 +196,9 @@ static int da9062_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev)
{
struct da9062_regulator *regl = rdev_get_drvdata(rdev);
- struct regmap_field *field;
int ret, val;
- /* Detect current regulator state */
- ret = regmap_field_read(regl->suspend, &val);
- if (ret < 0)
- return 0;
-
- /* Read regulator mode from proper register, depending on state */
- if (val)
- field = regl->suspend_sleep;
- else
- field = regl->sleep;
-
- ret = regmap_field_read(field, &val);
+ ret = regmap_field_read(regl->sleep, &val);
if (ret < 0)
return 0;
@@ -408,10 +384,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_BUCK2,
@@ -444,10 +420,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_BUCK3,
@@ -480,10 +456,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
},
{
.desc.id = DA9061_ID_LDO1,
@@ -509,10 +485,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO1_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+ __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -542,10 +518,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO2_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+ __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -575,10 +551,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO3_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+ __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -608,10 +584,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO4_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+ __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -652,10 +628,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK2,
@@ -688,10 +664,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK2_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK3,
@@ -724,10 +700,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_BUCK4,
@@ -760,10 +736,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+ __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
},
{
.desc.id = DA9062_ID_LDO1,
@@ -789,10 +765,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO1_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+ __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -822,10 +798,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO2_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+ __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -855,10 +831,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO3_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+ __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
@@ -888,10 +864,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
sizeof(unsigned int) * 8 -
__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
.suspend_vsel_reg = DA9062AA_VLDO4_B,
- .suspend = REG_FIELD(DA9062AA_DVC_1,
- __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+ .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+ __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
sizeof(unsigned int) * 8 -
- __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+ __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
.oc_event = REG_FIELD(DA9062AA_STATUS_D,
__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
sizeof(unsigned int) * 8 -
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index d90a6fd8cbc7..f81533070058 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -144,8 +144,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
- const struct fixed_dev_type *drvtype =
- of_match_device(dev->driver->of_match_table, dev)->data;
+ const struct fixed_dev_type *drvtype = of_device_get_match_data(dev);
struct regulator_config cfg = { };
enum gpiod_flags gflags;
int ret;
@@ -177,7 +176,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
- if (drvtype->has_enable_clock) {
+ if (drvtype && drvtype->has_enable_clock) {
drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
drvdata->enable_clock = devm_clk_get(dev, NULL);
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
index ff97cc50f2eb..9b05e03ba830 100644
--- a/drivers/regulator/lochnagar-regulator.c
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -210,6 +210,7 @@ static const struct regulator_desc lochnagar_regulators[] = {
.enable_time = 3000,
.ramp_delay = 1000,
+ .off_on_delay = 15000,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index afefb29ce1b0..87637eb6bcbc 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -231,12 +231,12 @@ static int of_get_regulation_constraints(struct device *dev,
"regulator-off-in-suspend"))
suspend_state->enabled = DISABLE_IN_SUSPEND;
- if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
- &pval))
+ if (!of_property_read_u32(suspend_np,
+ "regulator-suspend-min-microvolt", &pval))
suspend_state->min_uV = pval;
- if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
- &pval))
+ if (!of_property_read_u32(suspend_np,
+ "regulator-suspend-max-microvolt", &pval))
suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
@@ -445,11 +445,20 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
goto error;
}
- if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
- dev_err(dev,
- "driver callback failed to parse DT for regulator %pOFn\n",
- child);
- goto error;
+ if (desc->of_parse_cb) {
+ int ret;
+
+ ret = desc->of_parse_cb(child, desc, config);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ dev_err(dev,
+ "driver callback failed to parse DT for regulator %pOFn\n",
+ child);
+ goto error;
+ }
}
*node = child;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index df5df1c495ad..689537927f6f 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -788,7 +788,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
- regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+ ret = regmap_read(pfuze_chip->regmap,
+ desc->vsel_reg, &val);
+ if (ret) {
+ dev_err(&client->dev, "Fails to read from the register.\n");
+ return ret;
+ }
+
if (val & sw_hi) {
if (pfuze_chip->chip_id == PFUZE3000 ||
pfuze_chip->chip_id == PFUZE3001) {
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index db6c085da65e..0246b6f99fb5 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -735,8 +735,8 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
static const struct rpmh_vreg_hw_data pmic5_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
- .n_voltages = 136,
+ .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ .n_voltages = 32,
.pmic_mode_map = pmic_mode_map_pmic5_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
};
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index cced1ffb896c..89b9314d64c9 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb)
while (timeout++ <= abb->settling_time) {
status = ti_abb_check_txdone(abb);
if (status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
@@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb)
status = ti_abb_check_txdone(abb);
if (!status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fc53e1e221f0..c94184d080f8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
if (rc == 0) {
memcpy(&private->vsq, vsq, sizeof(*vsq));
} else {
- dev_warn(&device->cdev->dev,
- "Reading the volume storage information failed with rc=%d\n", rc);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading the volume storage information failed with rc=%d", rc);
}
if (useglobal)
@@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
if (rc == 0) {
dasd_eckd_cpy_ext_pool_data(device, lcq);
} else {
- dev_warn(&device->cdev->dev,
- "Reading the logical configuration failed with rc=%d\n", rc);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading the logical configuration failed with rc=%d", rc);
}
dasd_sfree_request(cqr, cqr->memdev);
@@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
dasd_eckd_read_features(device);
/* Read Volume Information */
- rc = dasd_eckd_read_vol_info(device);
- if (rc)
- goto out_err3;
+ dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
- rc = dasd_eckd_read_ext_pool_info(device);
- if (rc)
- goto out_err3;
+ dasd_eckd_read_ext_pool_info(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
- if (dasd_eckd_is_ese(device))
- dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
-
dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
"with %d cylinders, %d heads, %d sectors%s\n",
private->rdc_data.dev_type,
@@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
return -EINVAL;
}
-static struct dasd_ccw_req *
-dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
- struct request *req, sector_t first_trk,
- sector_t last_trk)
-{
- return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
-}
-
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *startdev,
struct dasd_block *block,
@@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
cmdwtd = private->features.feature[12] & 0x40;
use_prefix = private->features.feature[8] & 0x01;
- if (req_op(req) == REQ_OP_DISCARD)
- return dasd_eckd_build_cp_discard(startdev, block, req,
- first_trk, last_trk);
-
cqr = NULL;
if (cdlspecial || dasd_page_cache) {
/* do nothing, just fall through to the cmd mode single case */
@@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
struct dasd_block *block,
struct request *req)
{
- struct dasd_device *startdev = NULL;
struct dasd_eckd_private *private;
- struct dasd_ccw_req *cqr;
+ struct dasd_device *startdev;
unsigned long flags;
+ struct dasd_ccw_req *cqr;
- /* Discard requests can only be processed on base devices */
- if (req_op(req) != REQ_OP_DISCARD)
- startdev = dasd_alias_get_start_dev(base);
+ startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
private = startdev->private;
@@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
dasd_eckd_read_features(device);
/* Read Volume Information */
- rc = dasd_eckd_read_vol_info(device);
- if (rc)
- goto out_err2;
+ dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
- rc = dasd_eckd_read_ext_pool_info(device);
- if (rc)
- goto out_err2;
+ dasd_eckd_read_ext_pool_info(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->request_queue;
struct dasd_device *device = block->base;
- struct dasd_eckd_private *private;
- unsigned int max_discard_sectors;
- unsigned int max_bytes;
- unsigned int ext_bytes; /* Extent Size in Bytes */
- int recs_per_trk;
- int trks_per_cyl;
- int ext_limit;
- int ext_size; /* Extent Size in Cylinders */
int max;
- private = device->private;
- trks_per_cyl = private->rdc_data.trk_per_cyl;
- recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
-
if (device->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
@@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
- if (dasd_eckd_is_ese(device)) {
- /*
- * Depending on the extent size, up to UINT_MAX bytes can be
- * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
- * device limits should be exceeded.
- */
- ext_size = dasd_eckd_ext_size(device);
- ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
- ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
- logical_block_size;
- max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
- if (max_bytes / ext_bytes > ext_limit)
- max_bytes = ext_bytes * ext_limit;
-
- max_discard_sectors = max_bytes / 512;
-
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = ext_bytes;
- q->limits.discard_alignment = ext_bytes;
- }
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index ba7d2480613b..dcdaba689b20 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -113,6 +113,7 @@ struct subchannel {
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
+ u64 dma_mask;
char *driver_override; /* Driver name to force a match */
} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 1fbfb0a93f5f..831850435c23 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
*/
sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
- sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
+ /*
+ * But we don't have such restrictions imposed on the stuff that
+ * is handled by the streaming API.
+ */
+ sch->dma_mask = DMA_BIT_MASK(64);
+ sch->dev.dma_mask = &sch->dma_mask;
return sch;
err:
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 131430bd48d9..0c6245fc7706 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
if (!cdev->private)
goto err_priv;
cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
- cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+ cdev->dev.dma_mask = sch->dev.dma_mask;
dma_pool = cio_gp_dma_create(&cdev->dev, 1);
if (!dma_pool)
goto err_dma_pool;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a58b45df95d7..2a34a2ab9bf7 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -82,6 +82,7 @@ enum qdio_irq_states {
#define QDIO_SIGA_WRITE 0x00
#define QDIO_SIGA_READ 0x01
#define QDIO_SIGA_SYNC 0x02
+#define QDIO_SIGA_WRITEM 0x03
#define QDIO_SIGA_WRITEQ 0x04
#define QDIO_SIGA_QEBSM_FLAG 0x80
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5b63c505a2f7..7368407030b0 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
- unsigned long aob)
+static int qdio_siga_output(struct qdio_q *q, unsigned int count,
+ unsigned int *busy_bit, unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
- unsigned long laob = 0;
- if (aob) {
- fc = QDIO_SIGA_WRITEQ;
- laob = aob;
+ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
+ if (count > 1)
+ fc = QDIO_SIGA_WRITEM;
+ else if (aob)
+ fc = QDIO_SIGA_WRITEQ;
}
if (is_qebsm(q)) {
@@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
+ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
@@ -781,7 +782,8 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
return count;
}
-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
+static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
+ unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -793,7 +795,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
retry:
qperf_inc(q, siga_write);
- cc = qdio_siga_output(q, &busy_bit, aob);
+ cc = qdio_siga_output(q, count, &busy_bit, aob);
switch (cc) {
case 0:
break;
@@ -1526,7 +1528,7 @@ set:
* @count: how many buffers are filled
*/
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+ unsigned int bufnr, unsigned int count)
{
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
@@ -1549,13 +1551,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = 0;
- /* One SIGA-W per buffer required for unicast HSI */
- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
-
- if (q->u.out.use_cq)
+ if (q->u.out.use_cq && count == 1)
phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
- rc = qdio_kick_outbound_q(q, phys_aob);
+ rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
@@ -1564,7 +1563,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* The previous buffer is not processed yet, tack on. */
qperf_inc(q, fast_requeue);
} else {
- rc = qdio_kick_outbound_q(q, 0);
+ rc = qdio_kick_outbound_q(q, count, 0);
}
/* Let drivers implement their own completion scanning: */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index f4ca1d29d61b..cd164886132f 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -113,7 +113,7 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
- QDIO_MAX_BUFFERS_PER_Q);
+ sizeof(irq_ptr->qib.parm));
if (!input_slib_elements)
goto output;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 45bdb47f84c1..9157e728a362 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -522,8 +522,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&ap_perms_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
mutex_unlock(&ap_perms_mutex);
if (zcdndev) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e4b55f9aa062..d08154502b15 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -532,6 +532,8 @@ struct qeth_qdio_out_q {
struct timer_list timer;
struct qeth_hdr *prev_hdr;
u8 bulk_start;
+ u8 bulk_count;
+ u8 bulk_max;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -878,6 +880,13 @@ static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
return txq;
}
+static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
+ QETH_IQD_MCAST_TXQ;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a7868c8133ee..9e8bd8e08146 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1513,7 +1513,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
- card->state = CARD_STATE_DOWN;
return rc;
}
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
@@ -2634,6 +2633,18 @@ static int qeth_init_input_buffer(struct qeth_card *card,
return 0;
}
+static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue)
+{
+ if (!IS_IQD(card) ||
+ qeth_iqd_is_mcast_queue(card, queue) ||
+ card->options.cq == QETH_CQ_ENABLED ||
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
+ return 1;
+
+ return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
+}
+
int qeth_init_qdio_queues(struct qeth_card *card)
{
unsigned int i;
@@ -2673,6 +2684,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
queue->do_pack = 0;
queue->prev_hdr = NULL;
queue->bulk_start = 0;
+ queue->bulk_count = 0;
+ queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3107,7 +3120,7 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
for (i = queue->next_buf_to_init;
i < queue->next_buf_to_init + count; ++i) {
if (qeth_init_input_buffer(card,
- &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+ &queue->bufs[QDIO_BUFNR(i)])) {
break;
} else {
newcount++;
@@ -3149,8 +3162,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index)
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
- queue->next_buf_to_init = (queue->next_buf_to_init + count) %
- QDIO_MAX_BUFFERS_PER_Q;
+ queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
+ count);
}
}
@@ -3198,7 +3211,7 @@ static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
return 1;
}
return 0;
@@ -3252,7 +3265,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ unsigned int bidx = QDIO_BUFNR(i);
+
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
@@ -3318,10 +3332,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
{
- qeth_flush_buffers(queue, queue->bulk_start, 1);
+ qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
- queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
queue->prev_hdr = NULL;
+ queue->bulk_count = 0;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
@@ -3419,8 +3434,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
}
for (i = first_element; i < first_element + count; ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
+ struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
int e = 0;
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
@@ -3441,8 +3455,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
"QDIO reported an error, rc=%i\n", rc);
QETH_CARD_TEXT(card, 2, "qcqherr");
}
- card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
- + count) % QDIO_MAX_BUFFERS_PER_Q;
+
+ cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
}
static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
@@ -3468,7 +3482,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
- struct qeth_qdio_out_buffer *buffer;
struct net_device *dev = card->dev;
struct netdev_queue *txq;
int i;
@@ -3482,10 +3495,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
for (i = first_element; i < (first_element + count); ++i) {
- int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- buffer = queue->bufs[bidx];
- qeth_handle_send_error(card, buffer, qdio_error);
- qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
+ struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
+
+ qeth_handle_send_error(card, buf, qdio_error);
+ qeth_clear_output_buffer(queue, buf, qdio_error, 0);
}
atomic_sub(count, &queue->used_buffers);
@@ -3680,10 +3693,10 @@ check_layout:
}
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buffer,
struct sk_buff *curr_skb,
struct qeth_hdr *curr_hdr)
{
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
struct qeth_hdr *prev_hdr = queue->prev_hdr;
if (!prev_hdr)
@@ -3803,13 +3816,14 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len)
{
- struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
unsigned int bytes = qdisc_pkt_len(skb);
+ struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
bool flush;
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* Just a sanity check, the wake/stop logic should ensure that we always
@@ -3818,11 +3832,23 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
- !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
- atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
- buffer = queue->bufs[queue->bulk_start];
+ flush = !qeth_iqd_may_bulk(queue, skb, hdr);
+
+ if (flush ||
+ (buffer->next_element_to_fill + elements > queue->max_elements)) {
+ if (buffer->next_element_to_fill > 0) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->bulk_count++;
+ }
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
+
+ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
+ queue->bulk_count)];
/* Sanity-check again: */
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
@@ -3848,7 +3874,13 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush || next_element >= queue->max_elements) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- qeth_flush_queue(queue);
+ queue->bulk_count++;
+
+ if (queue->bulk_count >= queue->bulk_max)
+ flush = true;
+
+ if (flush)
+ qeth_flush_queue(queue);
}
if (stopped && !qeth_out_queue_is_full(queue))
@@ -3898,8 +3930,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
- (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
buffer = queue->bufs[queue->next_buf_to_fill];
/* We stepped forward, so sanity-check again: */
@@ -3932,8 +3963,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
flush_count++;
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ queue->next_buf_to_fill =
+ QDIO_BUFNR(queue->next_buf_to_fill + 1);
}
if (flush_count)
@@ -4261,7 +4292,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
}
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
void qeth_tx_timeout(struct net_device *dev)
{
@@ -4715,8 +4745,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "qdioest");
- qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
- GFP_KERNEL);
+ qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
if (!qib_param_field) {
rc = -ENOMEM;
goto out_free_nothing;
@@ -4978,6 +5007,15 @@ retriable:
goto out;
}
}
+
+ if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
+ (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
+ card->info.hwtrap = 0;
+
+ rc = qeth_set_access_ctrl_online(card, 0);
+ if (rc)
+ goto out;
+
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
@@ -5166,8 +5204,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
card->rx.b_count--;
if (card->rx.b_count) {
card->rx.b_index =
- (card->rx.b_index + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
+ QDIO_BUFNR(card->rx.b_index + 1);
card->rx.b_element =
&card->qdio.in_q
->bufs[card->rx.b_index]
@@ -5183,9 +5220,9 @@ int qeth_poll(struct napi_struct *napi, int budget)
}
}
- napi_complete_done(napi, work_done);
- if (qdio_start_irq(card->data.ccwdev, 0))
- napi_schedule(&card->napi);
+ if (napi_complete_done(napi, work_done) &&
+ qdio_start_irq(CARD_DDEV(card), 0))
+ napi_schedule(napi);
out:
return work_done;
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6420b58cf42b..9ad0d6f9d48b 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -11,6 +11,7 @@
#include <asm/qeth.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in6.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -365,8 +366,7 @@ struct qeth_ipacmd_setdelip6 {
struct qeth_ipacmd_setdelipm {
__u8 mac[6];
__u8 padding[2];
- __u8 ip6[12];
- __u8 ip4[4];
+ struct in6_addr ip;
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelmac {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b8799cd3e7aa..8f3093d24b12 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -759,14 +759,6 @@ add_napi:
return rc;
}
-static int qeth_l2_start_ipassists(struct qeth_card *card)
-{
- /* configure isolation level */
- if (qeth_set_access_ctrl_online(card, 0))
- return -ENODEV;
- return 0;
-}
-
static void qeth_l2_trace_features(struct qeth_card *card)
{
/* Set BridgePort features */
@@ -797,13 +789,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
- if (card->info.hwtrap &&
- qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
- card->info.hwtrap = 0;
- } else
- card->info.hwtrap = 0;
-
qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev,
@@ -825,12 +810,6 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
- if (IS_OSD(card) || IS_OSX(card)) {
- rc = qeth_l2_start_ipassists(card);
- if (rc)
- goto out_remove;
- }
-
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "6err%d", rc);
@@ -2021,10 +2000,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
static void qeth_l2_vnicc_init(struct qeth_card *card)
{
u32 *timeout = &card->options.vnicc.learning_timeout;
+ bool enable, error = false;
unsigned int chars_len, i;
unsigned long chars_tmp;
u32 sup_cmds, vnicc;
- bool enable, error;
QETH_CARD_TEXT(card, 2, "vniccini");
/* reset rx_bcast */
@@ -2045,17 +2024,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
for_each_set_bit(i, &chars_tmp, chars_len) {
vnicc = BIT(i);
- qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
- if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
- !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
+ sup_cmds = 0;
+ error = true;
+ }
+ if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
+ (sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ card->options.vnicc.getset_timeout_sup |= vnicc;
+ else
card->options.vnicc.getset_timeout_sup &= ~vnicc;
- if (!(sup_cmds & IPA_VNICC_ENABLE) ||
- !(sup_cmds & IPA_VNICC_DISABLE))
+ if ((sup_cmds & IPA_VNICC_ENABLE) &&
+ (sup_cmds & IPA_VNICC_DISABLE))
+ card->options.vnicc.set_char_sup |= vnicc;
+ else
card->options.vnicc.set_char_sup &= ~vnicc;
}
/* enforce assumed default values and recover settings, if changed */
- error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
- timeout);
+ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+ timeout);
chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 87659cfc9066..ba913d1ab88d 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -24,7 +24,6 @@ enum qeth_ip_types {
struct qeth_ipaddr {
struct hlist_node hnode;
enum qeth_ip_types type;
- unsigned char mac[ETH_ALEN];
u8 is_multicast:1;
u8 in_progress:1;
u8 disp_flag:2;
@@ -37,7 +36,7 @@ struct qeth_ipaddr {
enum qeth_prot_versions proto;
union {
struct {
- unsigned int addr;
+ __be32 addr;
unsigned int mask;
} a4;
struct {
@@ -74,12 +73,10 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
* so 'proto' and 'addr' match for sure.
*
* For ucast:
- * - 'mac' is always 0.
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
* values are required to avoid mixups in takeover eligibility.
*
* For mcast,
- * - 'mac' is mapped from the IP, and thus always matches.
* - 'mask'/'pfxlen' is always 0.
*/
if (a1->type != a2->type)
@@ -89,21 +86,12 @@ static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
return a1->u.a4.mask == a2->u.a4.mask;
}
-static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{
- u64 ret = 0;
- u8 *point;
-
- if (addr->proto == QETH_PROT_IPV6) {
- point = (u8 *) &addr->u.a6.addr;
- ret = get_unaligned((u64 *)point) ^
- get_unaligned((u64 *) (point + 8));
- }
- if (addr->proto == QETH_PROT_IPV4) {
- point = (u8 *) &addr->u.a4.addr;
- ret = get_unaligned((u32 *) point);
- }
- return ret;
+ if (addr->proto == QETH_PROT_IPV6)
+ return ipv6_addr_hash(&addr->u.a6.addr);
+ else
+ return ipv4_addr_hash(addr->u.a4.addr);
}
struct qeth_ipato_entry {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d7bfc7a0e4c0..70d4586dc779 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -76,7 +76,7 @@ static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot)
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
- u64 key = qeth_l3_ipaddr_hash(query);
+ u32 key = qeth_l3_ipaddr_hash(query);
struct qeth_ipaddr *addr;
if (query->is_multicast) {
@@ -381,12 +381,13 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
if (!iob)
return -ENOMEM;
cmd = __ipa_cmd(iob);
- ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
- if (addr->proto == QETH_PROT_IPV6)
- memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
- sizeof(struct in6_addr));
- else
- memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4);
+ if (addr->proto == QETH_PROT_IPV6) {
+ cmd->data.setdelipm.ip = addr->u.a6.addr;
+ ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac);
+ } else {
+ cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr;
+ ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac);
+ }
return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL);
}
@@ -953,8 +954,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
- if (qeth_set_access_ctrl_online(card, 0))
- return -EIO;
qeth_l3_start_ipa_arp_processing(card); /* go on*/
qeth_l3_start_ipa_source_mac(card); /* go on*/
qeth_l3_start_ipa_vlan(card); /* go on*/
@@ -1129,8 +1128,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rcu_dereference(im4->next_rcu)) {
- ip_eth_mc_map(im4->multiaddr, tmp->mac);
- tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
+ tmp->u.a4.addr = im4->multiaddr;
tmp->is_multicast = 1;
ipm = qeth_l3_find_addr_by_ip(card, tmp);
@@ -1141,8 +1139,8 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!ipm)
continue;
- ether_addr_copy(ipm->mac, tmp->mac);
- ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
+
+ ipm->u.a4.addr = im4->multiaddr;
ipm->is_multicast = 1;
ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
@@ -1209,9 +1207,7 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
return;
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
- ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
- memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
- sizeof(struct in6_addr));
+ tmp->u.a6.addr = im6->mca_addr;
tmp->is_multicast = 1;
ipm = qeth_l3_find_addr_by_ip(card, tmp);
@@ -1225,9 +1221,7 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
if (!ipm)
continue;
- ether_addr_copy(ipm->mac, tmp->mac);
- memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
- sizeof(struct in6_addr));
+ ipm->u.a6.addr = im6->mca_addr;
ipm->is_multicast = 1;
ipm->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->ip_mc_htable,
@@ -2313,13 +2307,6 @@ static int qeth_l3_set_online(struct ccwgroup_device *gdev)
goto out_remove;
}
- if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
- if (card->info.hwtrap &&
- qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
- card->info.hwtrap = 0;
- } else
- card->info.hwtrap = 0;
-
card->state = CARD_STATE_HARDSETUP;
qeth_print_status_message(card);
@@ -2557,7 +2544,7 @@ static int qeth_l3_ip_event(struct notifier_block *this,
QETH_CARD_TEXT(card, 3, "ipevent");
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4);
- addr.u.a4.addr = be32_to_cpu(ifa->ifa_address);
+ addr.u.a4.addr = ifa->ifa_address;
addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask);
return qeth_l3_handle_ip_event(card, &addr, event);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 296bbc3c4606..cf63916814cc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -27,6 +27,11 @@
struct kmem_cache *zfcp_fsf_qtcb_cache;
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- dev_warn(&adapter->ccw_device->dev,
- "The error threshold for checksum statistics "
- "has been exceeded\n");
zfcp_dbf_hba_bit_err("fssrh_3", req);
+ if (ber_stop) {
+ dev_warn(&adapter->ccw_device->dev,
+ "All paths over this FCP device are disused because of excessive bit errors\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+ } else {
+ dev_warn(&adapter->ccw_device->dev,
+ "The error threshold for checksum statistics has been exceeded\n");
+ }
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 1b92f3c19ff3..90cf4691b8c3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -898,7 +898,7 @@ config SCSI_SNI_53C710
config 53C700_LE_ON_BE
bool
- depends on SCSI_LASI700
+ depends on SCSI_LASI700 || SCSI_SNI_53C710
default y
config SCSI_STEX
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index da00ca5fa5dc..401743e2b429 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1923,6 +1923,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp;
struct bnx2fc_rport *tgt = io_req->tgt;
struct scsi_cmnd *sc_cmd;
+ u16 scope = 0, qualifier = 0;
/* scsi_cmd_cmpl is called with tgt lock held */
@@ -1990,12 +1991,30 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
io_req->cdb_status == SAM_STAT_BUSY) {
- /* Set the jiffies + retry_delay_timer * 100ms
- for the rport/tgt */
- tgt->retry_delay_timestamp = jiffies +
- fcp_rsp->retry_delay_timer * HZ / 10;
+ /* Newer array firmware with BUSY or
+ * TASK_SET_FULL may return a status that needs
+ * the scope bits masked.
+ * Or a huge delay timestamp up to 27 minutes
+ * can result.
+ */
+ if (fcp_rsp->retry_delay_timer) {
+ /* Upper 2 bits */
+ scope = fcp_rsp->retry_delay_timer
+ & 0xC000;
+ /* Lower 14 bits */
+ qualifier = fcp_rsp->retry_delay_timer
+ & 0x3FFF;
+ }
+ if (scope > 0 && qualifier > 0 &&
+ qualifier <= 0x3FEF) {
+ /* Set the jiffies +
+ * retry_delay_timer * 100ms
+ * for the rport/tgt
+ */
+ tgt->retry_delay_timestamp = jiffies +
+ (qualifier * HZ / 10);
+ }
}
-
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 5f8153c37f77..76751d6c7f0d 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file)
scsi_changer *ch = file->private_data;
scsi_device_put(ch->device);
- ch->device = NULL;
file->private_data = NULL;
kref_put(&ch->ref, ch_destroy);
return 0;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4971104b1817..f32da0ca529e 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
+ bool transitioning_sense = false;
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
goto retry;
}
/*
- * Retry on ALUA state transition or if any
- * UNIT ATTENTION occurred.
+ * If the array returns with 'ALUA state transition'
+ * sense code here it cannot return RTPG data during
+ * transition. So set the state to 'transitioning' directly.
*/
if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
- err = SCSI_DH_RETRY;
- else if (sense_hdr.sense_key == UNIT_ATTENTION)
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+ transitioning_sense = true;
+ goto skip_rtpg;
+ }
+ /*
+ * Retry on any other UNIT ATTENTION occurred.
+ */
+ if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
if (err == SCSI_DH_RETRY &&
pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
off = 8 + (desc[7] * 4);
}
+ skip_rtpg:
spin_lock_irqsave(&pg->lock, flags);
+ if (transitioning_sense)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d1513fdf1e00..0847e682797b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -3683,7 +3683,7 @@ void hisi_sas_debugfs_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
int i;
@@ -3705,7 +3705,7 @@ void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
}
-int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
{
const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev;
@@ -3796,7 +3796,7 @@ fail:
return -ENOMEM;
}
-void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
hisi_hba->debugfs_bist_dentry =
debugfs_create_dir("bist", hisi_hba->debugfs_dir);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ac39ed79ccaa..216e557f703e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5477,6 +5477,8 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
return SCSI_MLQUEUE_HOST_BUSY;
}
+ c->device = dev;
+
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
@@ -5548,6 +5550,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+ c->device = dev;
rc = hpsa_scsi_ioaccel_raid_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5555,6 +5558,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
hpsa_cmd_init(h, c->cmdindex, c);
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
+ c->device = dev;
rc = hpsa_scsi_ioaccel_direct_map(h, c);
if (rc < 0) /* scsi_dma_map failed. */
rc = SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e91377a4cafe..e8813d26e594 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -9055,7 +9055,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
}
-#if defined(BUILD_NVME)
/* Clear NVME stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9063,7 +9062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
}
}
-#endif
/* Clear SCSI stats */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fe1097666de4..6822cd9ff8f1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -528,7 +528,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
list_del_init(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
-#ifdef BUILD_NVME
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
@@ -536,7 +535,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
return;
}
-#endif
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 45a66048801b..ff6d4aa92421 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
pdev->subsystem_device == 0xC000)
- return -ENODEV;
+ goto out_disable_device;
/* Now check the magic signature byte */
pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
- return -ENODEV;
+ goto out_disable_device;
/* Ok it is probably a megaraid */
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 1659d35cd37b..59ca98f12afd 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -596,7 +596,7 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
tmp_prio = get->operational.app_prio.fcoe;
if (qedf_default_prio > -1)
qedf->prio = qedf_default_prio;
- else if (tmp_prio < 0 || tmp_prio > 7) {
+ else if (tmp_prio > 7) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"FIP/FCoE prio %d out of range, setting to %d.\n",
tmp_prio, QEDF_DEFAULT_PRIO);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8190c2a27584..30bafd9d21e9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2920,6 +2920,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
struct qla_hw_data *ha = vha->hw;
uint16_t id = vha->vp_idx;
+ set_bit(VPORT_DELETE, &vha->dpc_flags);
+
while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 873a6aef1c5c..6ffa9877c28b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2396,6 +2396,7 @@ typedef struct fc_port {
unsigned int query:1;
unsigned int id_changed:1;
unsigned int scan_needed:1;
+ unsigned int n2n_flag:1;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
@@ -2446,7 +2447,6 @@ typedef struct fc_port {
uint8_t fc4_type;
uint8_t fc4f_nvme;
uint8_t scan_state;
- uint8_t n2n_flag;
unsigned long last_queue_full;
unsigned long last_ramp_up;
@@ -3036,6 +3036,7 @@ enum scan_flags_t {
enum fc4type_t {
FS_FC4TYPE_FCP = BIT_0,
FS_FC4TYPE_NVME = BIT_1,
+ FS_FCP_IS_N2N = BIT_7,
};
struct fab_scan_rp {
@@ -4394,6 +4395,7 @@ typedef struct scsi_qla_host {
#define IOCB_WORK_ACTIVE 31
#define SET_ZIO_THRESHOLD_NEEDED 32
#define ISP_ABORT_TO_ROM 33
+#define VPORT_DELETE 34
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index dc0e36676313..5298ed10059f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3102,7 +3102,8 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
{
struct qla_work_evt *e;
- if (test_bit(UNLOADING, &vha->dpc_flags))
+ if (test_bit(UNLOADING, &vha->dpc_flags) ||
+ (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
return 0;
e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 643d2324082e..1d041313ec52 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -746,12 +746,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
break;
default:
if ((id.b24 != fcport->d_id.b24 &&
- fcport->d_id.b24) ||
+ fcport->d_id.b24 &&
+ fcport->loop_id != FC_NO_LOOP_ID) ||
(fcport->loop_id != FC_NO_LOOP_ID &&
fcport->loop_id != loop_id)) {
ql_dbg(ql_dbg_disc, vha, 0x20e3,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
+ if (fcport->n2n_flag)
+ fcport->d_id.b24 = 0;
qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -759,6 +762,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
}
fcport->loop_id = loop_id;
+ if (fcport->n2n_flag)
+ fcport->d_id.b24 = id.b24;
wwn = wwn_to_u64(fcport->port_name);
qlt_find_sess_invalidate_other(vha, wwn,
@@ -972,7 +977,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
wwn = wwn_to_u64(e->port_name);
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
- "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+ "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
__func__, (void *)&wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
(loop_id & 0x7fff));
@@ -1499,7 +1504,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
(fcport->fw_login_state == DSC_LS_PRLI_PEND)))
return 0;
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
+ !N2N_TOPO(vha->hw)) {
if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
@@ -1570,8 +1576,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
qla24xx_post_gpdb_work(vha, fcport, 0);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post NVMe PRLI\n",
- __func__, __LINE__, fcport->port_name);
+ "%s %d %8phC post %s PRLI\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->fc4f_nvme ? "NVME" : "FC");
qla24xx_post_prli_work(vha, fcport);
}
break;
@@ -1853,17 +1860,38 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
- if (ea->fcport->n2n_flag) {
+ if (ea->fcport->fc4f_nvme) {
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post fc4 prli\n",
__func__, __LINE__, ea->fcport->port_name);
ea->fcport->fc4f_nvme = 0;
- ea->fcport->n2n_flag = 0;
qla24xx_post_prli_work(vha, ea->fcport);
+ return;
+ }
+
+ /* at this point both PRLI NVME & PRLI FCP failed */
+ if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt < 3) {
+ ea->fcport->n2n_link_reset_cnt++;
+ /*
+ * remote port is not sending Plogi. Reset
+ * link to kick start his state machine
+ */
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ } else {
+ ql_log(ql_log_warn, vha, 0x2119,
+ "%s %d %8phC Unable to reconnect\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ }
+ } else {
+ /*
+ * switch connect. login failed. Take connection
+ * down and allow relogin to retrigger
+ */
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(ea->fcport);
}
- ql_dbg(ql_dbg_disc, vha, 0x2119,
- "%s %d %8phC unhandle event of %x\n",
- __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
break;
}
}
@@ -3190,7 +3218,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
for (j = 0; j < 2; j++, fwdt++) {
if (!fwdt->template) {
- ql_log(ql_log_warn, vha, 0x00ba,
+ ql_dbg(ql_dbg_init, vha, 0x00ba,
"-> fwdt%u no template\n", j);
continue;
}
@@ -4986,28 +5014,47 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
unsigned long flags;
/* Inititae N2N login. */
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, i, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
- (void *)ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- bp = (uint32_t *)ha->init_cb;
- for (i = 0; i < sz/4 ; i++, bp++)
- *bp = cpu_to_be32(*bp);
+ if (N2N_TOPO(ha)) {
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, i, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha,
+ ha->init_cb_dma, (void *)ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ bp = (uint32_t *)ha->init_cb;
+ for (i = 0; i < sz/4 ; i++, bp++)
+ *bp = cpu_to_be32(*bp);
- memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
- sizeof(ha->plogi_els_payld.data));
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
+ memcpy(&ha->plogi_els_payld.data,
+ (void *)ha->init_cb,
+ sizeof(ha->plogi_els_payld.data));
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ goto skip_login;
+ }
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->n2n_flag) {
+ qla24xx_fcport_handle_login(vha, fcport);
+ return QLA_SUCCESS;
+ }
+ }
+skip_login:
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
- return QLA_SUCCESS;
}
found_devs = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e92e52aa6e9b..518eb954cf42 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2656,9 +2656,10 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
- els_iocb->s_id[0] = vha->d_id.b.al_pa;
- els_iocb->s_id[1] = vha->d_id.b.area;
- els_iocb->s_id[2] = vha->d_id.b.domain;
+ /* For SID the byte order is different than DID */
+ els_iocb->s_id[1] = vha->d_id.b.al_pa;
+ els_iocb->s_id[2] = vha->d_id.b.area;
+ els_iocb->s_id[0] = vha->d_id.b.domain;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
els_iocb->control_flags = 0;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 4c26630c1c3e..009fd5a33fcd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2837,8 +2837,6 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(sp, cp->result);
- } else {
- WARN_ON_ONCE(true);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4c858e2d0ea8..1cc6913f76c4 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2249,7 +2249,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+ ql_dbg(ql_dbg_disc, vha, 0x105a,
"Entered %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
@@ -3883,14 +3883,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
case TOPO_N2N:
ha->current_topology = ISP_CFG_N;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ fcport->n2n_flag = 0;
+ }
+
fcport = qla2x00_find_fcport_by_wwpn(vha,
rptid_entry->u.f1.port_name, 1);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
fcport->plogi_nack_done_deadline = jiffies + HZ;
- fcport->dm_login_expire = jiffies + 3*HZ;
+ fcport->dm_login_expire = jiffies + 2*HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->n2n_flag = 1;
+ fcport->keep_nport_handle = 1;
+ if (vha->flags.nvme_enabled)
+ fcport->fc4f_nvme = 1;
+
switch (fcport->disc_state) {
case DSC_DELETED:
set_bit(RELOGIN_NEEDED,
@@ -3924,7 +3934,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f1.port_name,
rptid_entry->u.f1.node_name,
NULL,
- FC4_TYPE_UNKNOWN);
+ FS_FCP_IS_N2N);
}
/* if our portname is higher then initiate N2N login */
@@ -4023,6 +4033,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
+ fcport->n2n_flag = 0;
}
fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -4032,6 +4043,14 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->login_retry = vha->hw->login_retry_count;
fcport->plogi_nack_done_deadline = jiffies + HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->keep_nport_handle = 1;
+ fcport->n2n_flag = 1;
+ fcport->d_id.b.domain =
+ rptid_entry->u.f2.remote_nport_id[2];
+ fcport->d_id.b.area =
+ rptid_entry->u.f2.remote_nport_id[1];
+ fcport->d_id.b.al_pa =
+ rptid_entry->u.f2.remote_nport_id[0];
}
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 1a9a11ae7285..6afad68e5ba2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -66,6 +66,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
+ u8 i;
mutex_lock(&ha->vport_lock);
/*
@@ -75,8 +76,9 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
- 10*HZ);
+ for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
+ wait_event_timeout(vha->vref_waitq,
+ atomic_read(&vha->vref_count), HZ);
spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count)) {
@@ -262,6 +264,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vha, &ha->vp_list, list) {
if (vha->vp_idx) {
+ if (test_bit(VPORT_DELETE, &vha->dpc_flags))
+ continue;
+
atomic_inc(&vha->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -300,6 +305,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
int
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
+ fc_port_t *fcport;
+
+ /*
+ * To exclusively reset vport, we need to log it out first.
+ * Note: This control_vp can fail if ISP reset is already
+ * issued, this is expected, as the vp would be already
+ * logged out due to ISP reset.
+ */
+ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->logout_on_delete = 0;
+ }
+
/*
* Physical port will do most of the abort and recovery work. We can
* just treat it as a loop down
@@ -312,16 +331,9 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
}
- /*
- * To exclusively reset vport, we need to log it out first. Note: this
- * control_vp can fail if ISP reset is already issued, this is
- * expected, as the vp would be already logged out due to ISP reset.
- */
- if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
- qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
-
ql_dbg(ql_dbg_taskm, vha, 0x801d,
"Scheduling enable of Vport %d.\n", vha->vp_idx);
+
return qla24xx_enable_vp(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 73db01e3b4e4..bcb1e8598888 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1115,9 +1115,15 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
void
qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
+ u8 i;
+
qla2x00_mark_all_devices_lost(vha, 0);
- wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
+ for (i = 0; i < 10; i++)
+ wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
+ HZ);
+
+ flush_workqueue(vha->hw->wq);
}
/*
@@ -3218,6 +3224,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ if (unlikely(!ha->wq)) {
+ ret = -ENOMEM;
+ goto probe_failed;
+ }
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
@@ -5036,6 +5046,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
+
+ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+ fcport->n2n_flag = 1;
+
} else {
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s %8phC mem alloc fail.\n",
@@ -5134,11 +5148,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (dfcp)
qlt_schedule_sess_for_deletion(tfcp);
-
- if (N2N_TOPO(vha->hw))
- fcport->flags &= ~FCF_FABRIC_DEVICE;
-
if (N2N_TOPO(vha->hw)) {
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+ fcport->keep_nport_handle = 1;
if (vha->flags.nvme_enabled) {
fcport->fc4f_nvme = 1;
fcport->n2n_flag = 1;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0ffda6171614..a06e56224a55 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -953,7 +953,7 @@ void qlt_free_session_done(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- scsi_qla_host_t *base_vha;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
struct qlt_plogi_ack_t *own =
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
@@ -1020,6 +1020,7 @@ void qlt_free_session_done(struct work_struct *work)
if (logout_started) {
bool traced = false;
+ u16 cnt = 0;
while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
@@ -1029,6 +1030,9 @@ void qlt_free_session_done(struct work_struct *work)
traced = true;
}
msleep(100);
+ cnt++;
+ if (cnt > 200)
+ break;
}
ql_dbg(ql_dbg_disc, vha, 0xf087,
@@ -1101,6 +1105,7 @@ void qlt_free_session_done(struct work_struct *work)
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ sess->free_pending = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
@@ -1109,17 +1114,9 @@ void qlt_free_session_done(struct work_struct *work)
if (tgt && (tgt->sess_count == 0))
wake_up_all(&tgt->waitQ);
- if (vha->fcport_count == 0)
- wake_up_all(&vha->fcport_waitQ);
-
- base_vha = pci_get_drvdata(ha->pdev);
-
- sess->free_pending = 0;
-
- if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
- return;
-
- if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
+ !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
+ (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
switch (vha->host->active_mode) {
case MODE_INITIATOR:
case MODE_DUAL:
@@ -1132,6 +1129,9 @@ void qlt_free_session_done(struct work_struct *work)
break;
}
}
+
+ if (vha->fcport_count == 0)
+ wake_up_all(&vha->fcport_waitQ);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1161,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
sess->last_login_gen = sess->login_gen;
INIT_WORK(&sess->free_work, qlt_free_session_done);
- schedule_work(&sess->free_work);
+ queue_work(sess->vha->hw->wq, &sess->free_work);
}
EXPORT_SYMBOL(qlt_unreg_sess);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1c470e31ae81..ae2fa170f6ad 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->result = scmd->result;
+ ses->resid_len = scmd->req.resid_len;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
ses->eh_eflags = scmd->eh_eflags;
@@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->result = 0;
+ scmd->req.resid_len = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->result = ses->result;
+ scmd->req.resid_len = ses->resid_len;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dc210b9d4896..5447738906ac 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1834,6 +1834,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
+ .cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
};
@@ -1921,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
- if (q->mq_ops == &scsi_mq_ops)
+ if (q->mq_ops == &scsi_mq_ops_no_commit ||
+ q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 64c96c7828ee..6d7362e7367e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct kernfs_node *kn;
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ /*
+ * We need to try to get module, avoiding the module been removed
+ * during delete.
+ */
+ if (scsi_device_get(sdev))
+ return -ENODEV;
kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
WARN_ON_ONCE(!kn);
@@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
* state into SDEV_DEL.
*/
device_remove_file(dev, attr);
- scsi_remove_device(to_scsi_device(dev));
+ scsi_remove_device(sdev);
if (kn)
sysfs_unbreak_active_protection(kn);
+ scsi_device_put(sdev);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 50928bc266eb..03163ac5fe95 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1654,7 +1654,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
/* we need to evaluate the error return */
if (scsi_sense_valid(sshdr) &&
(sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20)) /* invalid command */
+ sshdr->asc == 0x20 || /* invalid command */
+ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
/* this is no error here */
return 0;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index aef4881d8e21..a85d52b5dc32 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -66,10 +66,8 @@ static int snirm710_probe(struct platform_device *dev)
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+ if (!hostdata)
return -ENOMEM;
- }
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ed8b9ac805e6..542d2bac2922 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1837,8 +1837,7 @@ static int storvsc_probe(struct hv_device *device,
/*
* Set the number of HW queues we are supporting.
*/
- if (stor_device->num_sc != 0)
- host->nr_hw_queues = stor_device->num_sc + 1;
+ host->nr_hw_queues = num_present_cpus();
/*
* Set the error handler work queue.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 034dd9cb9ec8..11a87f51c442 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8143,6 +8143,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
+ if (!hba->is_powered)
+ goto out;
+
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index bf68d86d80ee..1e164e03410a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu)
}
EXPORT_SYMBOL(qman_get_affine_portal);
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+ return (!device_link_add(dev, p->config->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
return __poll_portal_fast(p, limit);
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
index 50831ebf126a..c68882eb80f7 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -46,7 +46,7 @@ static ssize_t soc_uid_show(struct device *dev,
hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
hdr->size = 1;
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+ ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
return ret;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f9502dbbb5c1..9bb36c32cbf9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1171,6 +1171,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
+ if (!ctlr->ptp_sts_supported) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion);
@@ -1197,6 +1202,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
xfer->len);
}
+ if (!ctlr->ptp_sts_supported) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
@@ -1265,6 +1275,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
*/
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
+ struct spi_transfer *xfer;
struct spi_message *msg;
bool was_busy = false;
unsigned long flags;
@@ -1391,6 +1402,13 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
goto out;
}
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
@@ -1419,6 +1437,99 @@ static void spi_pump_messages(struct kthread_work *work)
}
/**
+ * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. The frequency with which this function
+ * must be called (once per word, once for the whole
+ * transfer, once per batch of words etc) is arbitrary
+ * as long as the @tx buffer offset is greater than or
+ * equal to the requested byte at the time of the
+ * call. The timestamp is only taken once, at the
+ * first such call. It is assumed that the driver
+ * advances its @tx buffer pointer monotonically.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @tx: Pointer to the current word within the xfer->tx_buf that the driver is
+ * preparing to transmit right now.
+ * @irqs_off: If true, will disable IRQs and preemption for the duration of the
+ * transfer, for less jitter in time measurement. Only compatible
+ * with PIO drivers. If true, must follow up with
+ * spi_take_timestamp_post or otherwise system will crash.
+ * WARNING: for fully predictable results, the CPU frequency must
+ * also be under control (governor).
+ */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off)
+{
+ u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped_pre)
+ return;
+
+ if (tx < (xfer->tx_buf + xfer->ptp_sts_word_pre * bytes_per_word))
+ return;
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_pre = (tx - xfer->tx_buf) / bytes_per_word;
+
+ xfer->timestamped_pre = true;
+
+ if (irqs_off) {
+ local_irq_save(ctlr->irq_flags);
+ preempt_disable();
+ }
+
+ ptp_read_system_prets(xfer->ptp_sts);
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
+
+/**
+ * spi_take_timestamp_post - helper for drivers to collect the end of the
+ * TX timestamp for the requested byte from the SPI
+ * transfer. Can be called with an arbitrary
+ * frequency: only the first call where @tx exceeds
+ * or is equal to the requested word will be
+ * timestamped.
+ * @ctlr: Pointer to the spi_controller structure of the driver
+ * @xfer: Pointer to the transfer being timestamped
+ * @tx: Pointer to the current word within the xfer->tx_buf that the driver has
+ * just transmitted.
+ * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
+ */
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ const void *tx, bool irqs_off)
+{
+ u8 bytes_per_word = DIV_ROUND_UP(xfer->bits_per_word, 8);
+
+ if (!xfer->ptp_sts)
+ return;
+
+ if (xfer->timestamped_post)
+ return;
+
+ if (tx < (xfer->tx_buf + xfer->ptp_sts_word_post * bytes_per_word))
+ return;
+
+ ptp_read_system_postts(xfer->ptp_sts);
+
+ if (irqs_off) {
+ local_irq_restore(ctlr->irq_flags);
+ preempt_enable();
+ }
+
+ /* Capture the resolution of the timestamp */
+ xfer->ptp_sts_word_post = (tx - xfer->tx_buf) / bytes_per_word;
+
+ xfer->timestamped_post = true;
+}
+EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
+
+/**
* spi_set_thread_rt - set the controller to pump at realtime priority
* @ctlr: controller to boost priority of
*
@@ -1503,6 +1614,7 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
*/
void spi_finalize_current_message(struct spi_controller *ctlr)
{
+ struct spi_transfer *xfer;
struct spi_message *mesg;
unsigned long flags;
int ret;
@@ -1511,6 +1623,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
mesg = ctlr->cur_msg;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+ if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
+ ptp_read_system_postts(xfer->ptp_sts);
+ xfer->ptp_sts_word_post = xfer->len;
+ }
+ }
+
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
@@ -3273,6 +3392,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
+ struct spi_transfer *xfer;
/*
* Some controllers do not support doing regular SPI transfers. Return
@@ -3288,6 +3408,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
trace_spi_message_submit(message);
+ if (!ctlr->ptp_sts_supported) {
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->ptp_sts_word_pre = 0;
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+ }
+
return ctlr->transfer(spi, message);
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 6f1fa4c849a1..333308fe807e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,4 +125,6 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/hp/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a90f9b308c8d..e4943cd63e98 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -53,3 +53,4 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_NET_VENDOR_HP) += hp/
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index 290dbfc7ace1..ce32dfe33bec 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config EXFAT_FS
tristate "exFAT fs support"
depends on BLOCK
@@ -6,7 +7,7 @@ config EXFAT_FS
This adds support for the exFAT file system.
config EXFAT_DONT_MOUNT_VFAT
- bool "Prohibit mounting of fat/vfat filesysems by exFAT"
+ bool "Prohibit mounting of fat/vfat filesystems by exFAT"
depends on EXFAT_FS
default y
help
diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile
index 84944dfbae28..6c90aec83feb 100644
--- a/drivers/staging/exfat/Makefile
+++ b/drivers/staging/exfat/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-or-later
obj-$(CONFIG_EXFAT_FS) += exfat.o
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 6c12f2d79f4d..3abab33e932c 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index f086c75e7076..81d20e6241c6 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
index 1565ce65d39f..e1b001718709 100644
--- a/drivers/staging/exfat/exfat_cache.c
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index b3e9cf725cf5..79174e5c4145 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
index 03cb8290b5d2..a5c4b68925fb 100644
--- a/drivers/staging/exfat/exfat_nls.c
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 5f6caee819a6..3b2b0ceb7297 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/time.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/mpage.h>
@@ -3450,7 +3451,7 @@ static void exfat_free_super(struct exfat_sb_info *sbi)
kfree(sbi->options.iocharset);
/* mutex_init is in exfat_fill_super function. only for 3.7+ */
mutex_destroy(&sbi->s_lock);
- kfree(sbi);
+ kvfree(sbi);
}
static void exfat_put_super(struct super_block *sb)
@@ -3845,7 +3846,7 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
* the filesystem, since we're only just about to mount
* it and have no inodes etc active!
*/
- sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+ sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
mutex_init(&sbi->s_lock);
diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c
index 366082fb3dab..b91a1faa0e50 100644
--- a/drivers/staging/exfat/exfat_upcase.c
+++ b/drivers/staging/exfat/exfat_upcase.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
*/
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 8ec524a95ec8..cb61c2a772bd 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
- depends on FB && SPI
+ depends on FB && SPI && OF
depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -199,13 +199,3 @@ config FB_TFT_WATTEROTT
depends on FB_TFT
help
Generic Framebuffer support for WATTEROTT
-
-config FB_FLEX
- tristate "Generic FB driver for TFT LCD displays"
- depends on FB_TFT
- help
- Generic Framebuffer support for TFT LCD displays.
-
-config FB_TFT_FBTFT_DEVICE
- tristate "Module to for adding FBTFT devices"
- depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 6bc03311c9c7..27af43f32f81 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -36,7 +36,3 @@ obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o
obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
-obj-$(CONFIG_FB_FLEX) += flexfb.o
-
-# Device modules
-obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index cf5700a2ea66..a0a67aa517f0 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -714,7 +714,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
if (par->gamma.curves && gamma) {
if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
strlen(gamma)))
- goto alloc_fail;
+ goto release_framebuf;
}
/* Transmit buffer */
@@ -731,7 +731,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
if (txbuflen > 0) {
txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
if (!txbuf)
- goto alloc_fail;
+ goto release_framebuf;
par->txbuf.buf = txbuf;
par->txbuf.len = txbuflen;
}
@@ -753,6 +753,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
return info;
+release_framebuf:
+ framebuffer_release(info);
+
alloc_fail:
vfree(vmem);
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
deleted file mode 100644
index 44e1410eb3fe..000000000000
--- a/drivers/staging/fbtft/fbtft_device.c
+++ /dev/null
@@ -1,1261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- *
- * Copyright (C) 2013, Noralf Tronnes
- */
-
-#define pr_fmt(fmt) "fbtft_device: " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <video/mipi_display.h>
-
-#include "fbtft.h"
-
-#define MAX_GPIOS 32
-
-static struct spi_device *spi_device;
-static struct platform_device *p_device;
-
-static char *name;
-module_param(name, charp, 0000);
-MODULE_PARM_DESC(name,
- "Devicename (required). name=list => list all supported devices.");
-
-static unsigned int rotate;
-module_param(rotate, uint, 0000);
-MODULE_PARM_DESC(rotate,
- "Angle to rotate display counter clockwise: 0, 90, 180, 270");
-
-static unsigned int busnum;
-module_param(busnum, uint, 0000);
-MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
-
-static unsigned int cs;
-module_param(cs, uint, 0000);
-MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
-
-static unsigned int speed;
-module_param(speed, uint, 0000);
-MODULE_PARM_DESC(speed, "SPI speed (override device default)");
-
-static int mode = -1;
-module_param(mode, int, 0000);
-MODULE_PARM_DESC(mode, "SPI mode (override device default)");
-
-static unsigned int fps;
-module_param(fps, uint, 0000);
-MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
-
-static char *gamma;
-module_param(gamma, charp, 0000);
-MODULE_PARM_DESC(gamma,
- "String representation of Gamma Curve(s). Driver specific.");
-
-static int txbuflen;
-module_param(txbuflen, int, 0000);
-MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
-
-static int bgr = -1;
-module_param(bgr, int, 0000);
-MODULE_PARM_DESC(bgr,
- "BGR bit (supported by some drivers).");
-
-static unsigned int startbyte;
-module_param(startbyte, uint, 0000);
-MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
-
-static bool custom;
-module_param(custom, bool, 0000);
-MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width, used with the custom argument");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height, used with the custom argument");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-
-static s16 init[FBTFT_MAX_INIT_SEQUENCE];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
-
-static unsigned long debug;
-module_param(debug, ulong, 0000);
-MODULE_PARM_DESC(debug,
- "level: 0-7 (the remaining 29 bits is for advanced usage)");
-
-static unsigned int verbose = 3;
-module_param(verbose, uint, 0000);
-MODULE_PARM_DESC(verbose,
- "0 silent, >1 show devices, >2 show devices before (default=3)");
-
-struct fbtft_device_display {
- char *name;
- struct spi_board_info *spi;
- struct platform_device *pdev;
-};
-
-static void fbtft_device_pdev_release(struct device *dev);
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye);
-
-#define ADAFRUIT18_GAMMA \
- "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
- "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10"
-
-#define CBERRY28_GAMMA \
- "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
- "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-
-static const s16 cberry28_init_sequence[] = {
- /* turn off sleep mode */
- -1, MIPI_DCS_EXIT_SLEEP_MODE,
- -2, 120,
-
- /* set pixel format to RGB-565 */
- -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
-
- -1, 0xB2, 0x0C, 0x0C, 0x00, 0x33, 0x33,
-
- /*
- * VGH = 13.26V
- * VGL = -10.43V
- */
- -1, 0xB7, 0x35,
-
- /*
- * VDV and VRH register values come from command write
- * (instead of NVM)
- */
- -1, 0xC2, 0x01, 0xFF,
-
- /*
- * VAP = 4.7V + (VCOM + VCOM offset + 0.5 * VDV)
- * VAN = -4.7V + (VCOM + VCOM offset + 0.5 * VDV)
- */
- -1, 0xC3, 0x17,
-
- /* VDV = 0V */
- -1, 0xC4, 0x20,
-
- /* VCOM = 0.675V */
- -1, 0xBB, 0x17,
-
- /* VCOM offset = 0V */
- -1, 0xC5, 0x20,
-
- /*
- * AVDD = 6.8V
- * AVCL = -4.8V
- * VDS = 2.3V
- */
- -1, 0xD0, 0xA4, 0xA1,
-
- -1, MIPI_DCS_SET_DISPLAY_ON,
-
- -3,
-};
-
-static const s16 hy28b_init_sequence[] = {
- -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
- -1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0207, -1, 0x0009, 0x0000,
- -1, 0x000a, 0x0000, -1, 0x000c, 0x0001,
- -1, 0x000d, 0x0000, -1, 0x000f, 0x0000,
- -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000, -1, 0x0013, 0x0000,
- -2, 50, -1, 0x0010, 0x1590, -1, 0x0011,
- 0x0227, -2, 50, -1, 0x0012, 0x009c, -2, 50,
- -1, 0x0013, 0x1900, -1, 0x0029, 0x0023,
- -1, 0x002b, 0x000e, -2, 50,
- -1, 0x0020, 0x0000, -1, 0x0021, 0x0000,
- -2, 50, -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00ef, -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013f, -1, 0x0060, 0xa700,
- -1, 0x0061, 0x0001, -1, 0x006a, 0x0000,
- -1, 0x0080, 0x0000, -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000, -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000, -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010, -1, 0x0092, 0x0000,
- -1, 0x0093, 0x0003, -1, 0x0095, 0x0110,
- -1, 0x0097, 0x0000, -1, 0x0098, 0x0000,
- -1, 0x0007, 0x0133, -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000, -2, 100, -3 };
-
-#define HY28B_GAMMA \
- "04 1F 4 7 7 0 7 7 6 0\n" \
- "0F 00 1 7 4 0 0 0 6 7"
-
-static const s16 pitft_init_sequence[] = {
- -1, MIPI_DCS_SOFT_RESET,
- -2, 5,
- -1, MIPI_DCS_SET_DISPLAY_OFF,
- -1, 0xEF, 0x03, 0x80, 0x02,
- -1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xE8, 0x85, 0x00, 0x78,
- -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xF7, 0x20,
- -1, 0xEA, 0x00, 0x00,
- -1, 0xC0, 0x23,
- -1, 0xC1, 0x10,
- -1, 0xC5, 0x3E, 0x28,
- -1, 0xC7, 0x86,
- -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
- -1, 0xB1, 0x00, 0x18,
- -1, 0xB6, 0x08, 0x82, 0x27,
- -1, 0xF2, 0x00,
- -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
- 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
- -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
- 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
- -1, MIPI_DCS_EXIT_SLEEP_MODE,
- -2, 100,
- -1, MIPI_DCS_SET_DISPLAY_ON,
- -2, 20,
- -3
-};
-
-static const s16 waveshare32b_init_sequence[] = {
- -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xE8, 0x85, 0x00, 0x78,
- -1, 0xEA, 0x00, 0x00,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xF7, 0x20,
- -1, 0xC0, 0x23,
- -1, 0xC1, 0x10,
- -1, 0xC5, 0x3E, 0x28,
- -1, 0xC7, 0x86,
- -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28,
- -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
- -1, 0xB1, 0x00, 0x18,
- -1, 0xB6, 0x08, 0x82, 0x27,
- -1, 0xF2, 0x00,
- -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
- 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
- -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
- 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
- -1, MIPI_DCS_EXIT_SLEEP_MODE,
- -2, 120,
- -1, MIPI_DCS_SET_DISPLAY_ON,
- -1, MIPI_DCS_WRITE_MEMORY_START,
- -3
-};
-
-#define PIOLED_GAMMA "0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 " \
- "2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 " \
- "3 3 3 4 4 4 4 4 4 4 4 4 4 4 4"
-
-/* Supported displays in alphabetical order */
-static struct fbtft_device_display displays[] = {
- {
- .name = "adafruit18",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .gamma = ADAFRUIT18_GAMMA,
- }
- }
- }, {
- .name = "adafruit18_green",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 4000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .fbtftops.set_addr_win =
- adafruit18_green_tab_set_addr_win,
- },
- .bgr = true,
- .gamma = ADAFRUIT18_GAMMA,
- }
- }
- }, {
- .name = "adafruit22",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8340bn",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "adafruit22a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "adafruit28",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "adafruit13m",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1306",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "admatec_c-berry28",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7789v",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = cberry28_init_sequence,
- },
- .gamma = CBERRY28_GAMMA,
- }
- }
- }, {
- .name = "agm1264k-fl",
- .pdev = &(struct platform_device) {
- .name = "fb_agm1264k-fl",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = FBTFT_ONBOARD_BACKLIGHT,
- },
- },
- }
- }
- }, {
- .name = "dogs102",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1701",
- .max_speed_hz = 8000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "er_tftm050_2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ra8875",
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .width = 480,
- .height = 272,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "er_tftm070_5",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ra8875",
- .max_speed_hz = 5000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .width = 800,
- .height = 480,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "ew24ha0",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1611",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "ew24ha0_9bit",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_uc1611",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- },
- }
- }
- }, {
- .name = "flexfb",
- .spi = &(struct spi_board_info) {
- .modalias = "flexfb",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- }
- }, {
- .name = "flexpfb",
- .pdev = &(struct platform_device) {
- .name = "flexpfb",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- }
- }
- }, {
- .name = "freetronicsoled128",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1351",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = FBTFT_ONBOARD_BACKLIGHT,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "hx8353d",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8353d",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- }
- }
- }, {
- .name = "hy28a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9320",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .startbyte = 0x70,
- .bgr = true,
- }
- }
- }, {
- .name = "hy28b",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9325",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = hy28b_init_sequence,
- },
- .startbyte = 0x70,
- .bgr = true,
- .fps = 50,
- .gamma = HY28B_GAMMA,
- }
- }
- }, {
- .name = "ili9481",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9481",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .regwidth = 16,
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "itdb24",
- .pdev = &(struct platform_device) {
- .name = "fb_s6d1121",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = false,
- },
- }
- }
- }, {
- .name = "itdb28",
- .pdev = &(struct platform_device) {
- .name = "fb_ili9325",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- },
- }
- }
- }, {
- .name = "itdb28_spi",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9325",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "mi0283qt-2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_hx8347d",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .startbyte = 0x70,
- .bgr = true,
- }
- }
- }, {
- .name = "mi0283qt-9a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 9,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "mi0283qt-v2",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_watterott",
- .max_speed_hz = 4000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- }
- }
- }, {
- .name = "nokia3310",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_pcd8544",
- .max_speed_hz = 400000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "nokia3310a",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_tls8204",
- .max_speed_hz = 1000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "nokia5110",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9163",
- .max_speed_hz = 12000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "piscreen",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9486",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .regwidth = 16,
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "pitft",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .chip_select = 0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence = pitft_init_sequence,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "pioled",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1351",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- .bgr = true,
- .gamma = PIOLED_GAMMA
- }
- }
- }, {
- .name = "rpi-display",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "s6d02a1",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_s6d02a1",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "sainsmart18",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_st7735r",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "sainsmart32",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- .fbtftops.write = write_gpio16_wr_slow,
- },
- .bgr = true,
- },
- },
- }
- }, {
- .name = "sainsmart32_fast",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- },
- .bgr = true,
- },
- },
- }
- }, {
- .name = "sainsmart32_latched",
- .pdev = &(struct platform_device) {
- .name = "fb_ssd1289",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 16,
- .txbuflen = -2, /* disable buffer */
- .backlight = 1,
- .fbtftops.write =
- fbtft_write_gpio16_wr_latched,
- },
- .bgr = true,
- },
- },
- }
- }, {
- .name = "sainsmart32_spi",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1289",
- .max_speed_hz = 16000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "spidev",
- .spi = &(struct spi_board_info) {
- .modalias = "spidev",
- .max_speed_hz = 500000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- }
- }
- }, {
- .name = "ssd1331",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ssd1331",
- .max_speed_hz = 20000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "tinylcd35",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_tinylcd",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "tm022hdh26",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9341",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "tontec35_9481", /* boards before 02 July 2014 */
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9481",
- .max_speed_hz = 128000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "tontec35_9486", /* boards after 02 July 2014 */
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9486",
- .max_speed_hz = 128000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "upd161704",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_upd161704",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- .name = "waveshare32b",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_ili9340",
- .max_speed_hz = 48000000,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- .backlight = 1,
- .init_sequence =
- waveshare32b_init_sequence,
- },
- .bgr = true,
- }
- }
- }, {
- .name = "waveshare22",
- .spi = &(struct spi_board_info) {
- .modalias = "fb_bd663474",
- .max_speed_hz = 32000000,
- .mode = SPI_MODE_3,
- .platform_data = &(struct fbtft_platform_data) {
- .display = {
- .buswidth = 8,
- },
- }
- }
- }, {
- /* This should be the last item.
- * Used with the custom argument
- */
- .name = "",
- .spi = &(struct spi_board_info) {
- .modalias = "",
- .max_speed_hz = 0,
- .mode = SPI_MODE_0,
- .platform_data = &(struct fbtft_platform_data) {
- }
- },
- .pdev = &(struct platform_device) {
- .name = "",
- .id = 0,
- .dev = {
- .release = fbtft_device_pdev_release,
- .platform_data = &(struct fbtft_platform_data) {
- },
- },
- },
- }
-};
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
-{
- u16 data;
- int i;
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- static u16 prev_data;
-#endif
-
- fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
- "%s(len=%zu): ", __func__, len);
-
- while (len) {
- data = *(u16 *)buf;
-
- /* Start writing by pulling down /WR */
- gpiod_set_value(par->gpio.wr, 0);
-
- /* Set data */
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- if (data == prev_data) {
- gpiod_set_value(par->gpio.wr, 0); /* used as delay */
- } else {
- for (i = 0; i < 16; i++) {
- if ((data & 1) != (prev_data & 1))
- gpiod_set_value(par->gpio.db[i],
- data & 1);
- data >>= 1;
- prev_data >>= 1;
- }
- }
-#else
- for (i = 0; i < 16; i++) {
- gpiod_set_value(par->gpio.db[i], data & 1);
- data >>= 1;
- }
-#endif
-
- /* Pullup /WR */
- gpiod_set_value(par->gpio.wr, 1);
-
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *)buf;
-#endif
- buf += 2;
- len -= 2;
- }
-
- return 0;
-}
-
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
- write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
- write_reg(par, 0x2C);
-}
-
-static void fbtft_device_pdev_release(struct device *dev)
-{
-/* Needed to silence this message:
- * Device 'xxx' does not have a release() function,
- * it is broken and must be fixed
- */
-}
-
-static int spi_device_found(struct device *dev, void *data)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
- dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
- spi->mode);
-
- return 0;
-}
-
-static void pr_spi_devices(void)
-{
- pr_debug("SPI devices registered:\n");
- bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found);
-}
-
-static int p_device_found(struct device *dev, void *data)
-{
- struct platform_device
- *pdev = to_platform_device(dev);
-
- if (strstr(pdev->name, "fb"))
- dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
- pdev->dev.platform_data ? "yes" : "no");
-
- return 0;
-}
-
-static void pr_p_devices(void)
-{
- pr_debug("'fb' Platform devices registered:\n");
- bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found);
-}
-
-#ifdef MODULE
-static void fbtft_device_spi_delete(struct spi_master *master, unsigned int cs)
-{
- struct device *dev;
- char str[32];
-
- snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs);
-
- dev = bus_find_device_by_name(&spi_bus_type, NULL, str);
- if (dev) {
- if (verbose)
- dev_info(dev, "Deleting %s\n", str);
- device_del(dev);
- }
-}
-
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
- struct spi_master *master;
-
- master = spi_busnum_to_master(spi->bus_num);
- if (!master) {
- pr_err("spi_busnum_to_master(%d) returned NULL\n",
- spi->bus_num);
- return -EINVAL;
- }
- /* make sure it's available */
- fbtft_device_spi_delete(master, spi->chip_select);
- spi_device = spi_new_device(master, spi);
- put_device(&master->dev);
- if (!spi_device) {
- dev_err(&master->dev, "spi_new_device() returned NULL\n");
- return -EPERM;
- }
- return 0;
-}
-#else
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
- return spi_register_board_info(spi, 1);
-}
-#endif
-
-static int __init fbtft_device_init(void)
-{
- struct spi_board_info *spi = NULL;
- struct fbtft_platform_data *pdata;
- bool found = false;
- int i = 0;
- int ret = 0;
-
- if (!name) {
-#ifdef MODULE
- pr_err("missing module parameter: 'name'\n");
- return -EINVAL;
-#else
- return 0;
-#endif
- }
-
- if (init_num > FBTFT_MAX_INIT_SEQUENCE) {
- pr_err("init parameter: exceeded max array size: %d\n",
- FBTFT_MAX_INIT_SEQUENCE);
- return -EINVAL;
- }
-
- if (verbose > 2) {
- pr_spi_devices(); /* print list of registered SPI devices */
- pr_p_devices(); /* print list of 'fb' platform devices */
- }
-
- pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
-
- if (rotate > 0 && rotate < 4) {
- rotate = (4 - rotate) * 90;
- pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n",
- rotate);
- }
- if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) {
- pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n",
- rotate);
- rotate = 0;
- }
-
- /* name=list lists all supported displays */
- if (strcmp(name, "list") == 0) {
- pr_info("Supported displays:\n");
-
- for (i = 0; i < ARRAY_SIZE(displays); i++)
- pr_info("%s\n", displays[i].name);
- return -ECANCELED;
- }
-
- if (custom) {
- i = ARRAY_SIZE(displays) - 1;
- displays[i].name = name;
- if (speed == 0) {
- displays[i].pdev->name = name;
- displays[i].spi = NULL;
- } else {
- size_t len;
-
- len = strlcpy(displays[i].spi->modalias, name,
- SPI_NAME_SIZE);
- if (len >= SPI_NAME_SIZE)
- pr_warn("modalias (name) truncated to: %s\n",
- displays[i].spi->modalias);
- displays[i].pdev = NULL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(displays); i++) {
- if (strncmp(name, displays[i].name, SPI_NAME_SIZE) == 0) {
- if (displays[i].spi) {
- spi = displays[i].spi;
- spi->chip_select = cs;
- spi->bus_num = busnum;
- if (speed)
- spi->max_speed_hz = speed;
- if (mode != -1)
- spi->mode = mode;
- pdata = (void *)spi->platform_data;
- } else if (displays[i].pdev) {
- p_device = displays[i].pdev;
- pdata = p_device->dev.platform_data;
- } else {
- pr_err("broken displays array\n");
- return -EINVAL;
- }
-
- pdata->rotate = rotate;
- if (bgr == 0)
- pdata->bgr = false;
- else if (bgr == 1)
- pdata->bgr = true;
- if (startbyte)
- pdata->startbyte = startbyte;
- if (gamma)
- pdata->gamma = gamma;
- pdata->display.debug = debug;
- if (fps)
- pdata->fps = fps;
- if (txbuflen)
- pdata->txbuflen = txbuflen;
- if (init_num)
- pdata->display.init_sequence = init;
- if (custom) {
- pdata->display.width = width;
- pdata->display.height = height;
- pdata->display.buswidth = buswidth;
- pdata->display.backlight = 1;
- }
-
- if (displays[i].spi) {
- ret = fbtft_device_spi_device_register(spi);
- if (ret) {
- pr_err("failed to register SPI device\n");
- return ret;
- }
- } else {
- ret = platform_device_register(p_device);
- if (ret < 0) {
- pr_err("platform_device_register() returned %d\n",
- ret);
- return ret;
- }
- }
- found = true;
- break;
- }
- }
-
- if (!found) {
- pr_err("display not supported: '%s'\n", name);
- return -EINVAL;
- }
-
- if (spi_device && (verbose > 1))
- pr_spi_devices();
- if (p_device && (verbose > 1))
- pr_p_devices();
-
- return 0;
-}
-
-static void __exit fbtft_device_exit(void)
-{
- if (spi_device) {
- device_del(&spi_device->dev);
- kfree(spi_device);
- }
-
- if (p_device)
- platform_device_unregister(p_device);
-}
-
-arch_initcall(fbtft_device_init);
-module_exit(fbtft_device_exit);
-
-MODULE_DESCRIPTION("Add a FBTFT device.");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
deleted file mode 100644
index 3747321011fa..000000000000
--- a/drivers/staging/fbtft/flexfb.c
+++ /dev/null
@@ -1,851 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Generic FB driver for TFT LCD displays
- *
- * Copyright (C) 2013 Noralf Tronnes
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME "flexfb"
-
-static char *chip;
-module_param(chip, charp, 0000);
-MODULE_PARM_DESC(chip, "LCD controller");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height");
-
-static s16 init[512];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence");
-
-static unsigned int setaddrwin;
-module_param(setaddrwin, uint, 0000);
-MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)");
-
-static unsigned int regwidth = 8;
-module_param(regwidth, uint, 0000);
-MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)");
-
-static bool nobacklight;
-module_param(nobacklight, bool, 0000);
-MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality.");
-
-static bool latched;
-module_param(latched, bool, 0000);
-MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-
-static const s16 *initp;
-static int initp_num;
-
-/* default init sequences */
-static const s16 st7735r_init[] = {
- -1, 0x01,
- -2, 150,
- -1, 0x11,
- -2, 500,
- -1, 0xB1, 0x01, 0x2C, 0x2D,
- -1, 0xB2, 0x01, 0x2C, 0x2D,
- -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
- -1, 0xB4, 0x07,
- -1, 0xC0, 0xA2, 0x02, 0x84,
- -1, 0xC1, 0xC5,
- -1, 0xC2, 0x0A, 0x00,
- -1, 0xC3, 0x8A, 0x2A,
- -1, 0xC4, 0x8A, 0xEE,
- -1, 0xC5, 0x0E,
- -1, 0x20,
- -1, 0x36, 0xC0,
- -1, 0x3A, 0x05,
- -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
- 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
- -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
- 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
- -1, 0x29,
- -2, 100,
- -1, 0x13,
- -2, 10,
- -3
-};
-
-static const s16 ssd1289_init[] = {
- -1, 0x00, 0x0001,
- -1, 0x03, 0xA8A4,
- -1, 0x0C, 0x0000,
- -1, 0x0D, 0x080C,
- -1, 0x0E, 0x2B00,
- -1, 0x1E, 0x00B7,
- -1, 0x01, 0x2B3F,
- -1, 0x02, 0x0600,
- -1, 0x10, 0x0000,
- -1, 0x11, 0x6070,
- -1, 0x05, 0x0000,
- -1, 0x06, 0x0000,
- -1, 0x16, 0xEF1C,
- -1, 0x17, 0x0003,
- -1, 0x07, 0x0233,
- -1, 0x0B, 0x0000,
- -1, 0x0F, 0x0000,
- -1, 0x41, 0x0000,
- -1, 0x42, 0x0000,
- -1, 0x48, 0x0000,
- -1, 0x49, 0x013F,
- -1, 0x4A, 0x0000,
- -1, 0x4B, 0x0000,
- -1, 0x44, 0xEF00,
- -1, 0x45, 0x0000,
- -1, 0x46, 0x013F,
- -1, 0x30, 0x0707,
- -1, 0x31, 0x0204,
- -1, 0x32, 0x0204,
- -1, 0x33, 0x0502,
- -1, 0x34, 0x0507,
- -1, 0x35, 0x0204,
- -1, 0x36, 0x0204,
- -1, 0x37, 0x0502,
- -1, 0x3A, 0x0302,
- -1, 0x3B, 0x0302,
- -1, 0x23, 0x0000,
- -1, 0x24, 0x0000,
- -1, 0x25, 0x8000,
- -1, 0x4f, 0x0000,
- -1, 0x4e, 0x0000,
- -1, 0x22,
- -3
-};
-
-static const s16 hx8340bn_init[] = {
- -1, 0xC1, 0xFF, 0x83, 0x40,
- -1, 0x11,
- -2, 150,
- -1, 0xCA, 0x70, 0x00, 0xD9,
- -1, 0xB0, 0x01, 0x11,
- -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
- -2, 20,
- -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
- -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
- -2, 10,
- -1, 0xB5, 0x35, 0x20, 0x45,
- -1, 0xB4, 0x33, 0x25, 0x4C,
- -2, 10,
- -1, 0x3A, 0x05,
- -1, 0x29,
- -2, 10,
- -3
-};
-
-static const s16 ili9225_init[] = {
- -1, 0x0001, 0x011C,
- -1, 0x0002, 0x0100,
- -1, 0x0003, 0x1030,
- -1, 0x0008, 0x0808,
- -1, 0x000C, 0x0000,
- -1, 0x000F, 0x0A01,
- -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000,
- -2, 50,
- -1, 0x0010, 0x0A00,
- -1, 0x0011, 0x1038,
- -2, 50,
- -1, 0x0012, 0x1121,
- -1, 0x0013, 0x004E,
- -1, 0x0014, 0x676F,
- -1, 0x0030, 0x0000,
- -1, 0x0031, 0x00DB,
- -1, 0x0032, 0x0000,
- -1, 0x0033, 0x0000,
- -1, 0x0034, 0x00DB,
- -1, 0x0035, 0x0000,
- -1, 0x0036, 0x00AF,
- -1, 0x0037, 0x0000,
- -1, 0x0038, 0x00DB,
- -1, 0x0039, 0x0000,
- -1, 0x0050, 0x0000,
- -1, 0x0051, 0x060A,
- -1, 0x0052, 0x0D0A,
- -1, 0x0053, 0x0303,
- -1, 0x0054, 0x0A0D,
- -1, 0x0055, 0x0A06,
- -1, 0x0056, 0x0000,
- -1, 0x0057, 0x0303,
- -1, 0x0058, 0x0000,
- -1, 0x0059, 0x0000,
- -2, 50,
- -1, 0x0007, 0x1017,
- -2, 50,
- -3
-};
-
-static const s16 ili9320_init[] = {
- -1, 0x00E5, 0x8000,
- -1, 0x0000, 0x0001,
- -1, 0x0001, 0x0100,
- -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030,
- -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0202,
- -1, 0x0009, 0x0000,
- -1, 0x000A, 0x0000,
- -1, 0x000C, 0x0000,
- -1, 0x000D, 0x0000,
- -1, 0x000F, 0x0000,
- -1, 0x0010, 0x0000,
- -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000,
- -1, 0x0013, 0x0000,
- -2, 200,
- -1, 0x0010, 0x17B0,
- -1, 0x0011, 0x0031,
- -2, 50,
- -1, 0x0012, 0x0138,
- -2, 50,
- -1, 0x0013, 0x1800,
- -1, 0x0029, 0x0008,
- -2, 50,
- -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000,
- -1, 0x0030, 0x0000,
- -1, 0x0031, 0x0505,
- -1, 0x0032, 0x0004,
- -1, 0x0035, 0x0006,
- -1, 0x0036, 0x0707,
- -1, 0x0037, 0x0105,
- -1, 0x0038, 0x0002,
- -1, 0x0039, 0x0707,
- -1, 0x003C, 0x0704,
- -1, 0x003D, 0x0807,
- -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00EF,
- -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013F,
- -1, 0x0060, 0x2700,
- -1, 0x0061, 0x0001,
- -1, 0x006A, 0x0000,
- -1, 0x0080, 0x0000,
- -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000,
- -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000,
- -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010,
- -1, 0x0092, 0x0000,
- -1, 0x0093, 0x0003,
- -1, 0x0095, 0x0110,
- -1, 0x0097, 0x0000,
- -1, 0x0098, 0x0000,
- -1, 0x0007, 0x0173,
- -3
-};
-
-static const s16 ili9325_init[] = {
- -1, 0x00E3, 0x3008,
- -1, 0x00E7, 0x0012,
- -1, 0x00EF, 0x1231,
- -1, 0x0001, 0x0100,
- -1, 0x0002, 0x0700,
- -1, 0x0003, 0x1030,
- -1, 0x0004, 0x0000,
- -1, 0x0008, 0x0207,
- -1, 0x0009, 0x0000,
- -1, 0x000A, 0x0000,
- -1, 0x000C, 0x0000,
- -1, 0x000D, 0x0000,
- -1, 0x000F, 0x0000,
- -1, 0x0010, 0x0000,
- -1, 0x0011, 0x0007,
- -1, 0x0012, 0x0000,
- -1, 0x0013, 0x0000,
- -2, 200,
- -1, 0x0010, 0x1690,
- -1, 0x0011, 0x0223,
- -2, 50,
- -1, 0x0012, 0x000D,
- -2, 50,
- -1, 0x0013, 0x1200,
- -1, 0x0029, 0x000A,
- -1, 0x002B, 0x000C,
- -2, 50,
- -1, 0x0020, 0x0000,
- -1, 0x0021, 0x0000,
- -1, 0x0030, 0x0000,
- -1, 0x0031, 0x0506,
- -1, 0x0032, 0x0104,
- -1, 0x0035, 0x0207,
- -1, 0x0036, 0x000F,
- -1, 0x0037, 0x0306,
- -1, 0x0038, 0x0102,
- -1, 0x0039, 0x0707,
- -1, 0x003C, 0x0702,
- -1, 0x003D, 0x1604,
- -1, 0x0050, 0x0000,
- -1, 0x0051, 0x00EF,
- -1, 0x0052, 0x0000,
- -1, 0x0053, 0x013F,
- -1, 0x0060, 0xA700,
- -1, 0x0061, 0x0001,
- -1, 0x006A, 0x0000,
- -1, 0x0080, 0x0000,
- -1, 0x0081, 0x0000,
- -1, 0x0082, 0x0000,
- -1, 0x0083, 0x0000,
- -1, 0x0084, 0x0000,
- -1, 0x0085, 0x0000,
- -1, 0x0090, 0x0010,
- -1, 0x0092, 0x0600,
- -1, 0x0007, 0x0133,
- -3
-};
-
-static const s16 ili9341_init[] = {
- -1, 0x28,
- -2, 20,
- -1, 0xCF, 0x00, 0x83, 0x30,
- -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xE8, 0x85, 0x01, 0x79,
- -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
- -1, 0xF7, 0x20,
- -1, 0xEA, 0x00, 0x00,
- -1, 0xC0, 0x26,
- -1, 0xC1, 0x11,
- -1, 0xC5, 0x35, 0x3E,
- -1, 0xC7, 0xBE,
- -1, 0xB1, 0x00, 0x1B,
- -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
- -1, 0xB7, 0x07,
- -1, 0x3A, 0x55,
- -1, 0x36, 0x48,
- -1, 0x11,
- -2, 120,
- -1, 0x29,
- -2, 20,
- -3
-};
-
-static const s16 ssd1351_init[] = {
- -1, 0xfd, 0x12,
- -1, 0xfd, 0xb1,
- -1, 0xae,
- -1, 0xb3, 0xf1,
- -1, 0xca, 0x7f,
- -1, 0xa0, 0x74,
- -1, 0x15, 0x00, 0x7f,
- -1, 0x75, 0x00, 0x7f,
- -1, 0xa1, 0x00,
- -1, 0xa2, 0x00,
- -1, 0xb5, 0x00,
- -1, 0xab, 0x01,
- -1, 0xb1, 0x32,
- -1, 0xb4, 0xa0, 0xb5, 0x55,
- -1, 0xbb, 0x17,
- -1, 0xbe, 0x05,
- -1, 0xc1, 0xc8, 0x80, 0xc8,
- -1, 0xc7, 0x0f,
- -1, 0xb6, 0x01,
- -1, 0xa6,
- -1, 0xaf,
- -3
-};
-
-/**
- * struct flexfb_lcd_controller - Describes the LCD controller properties
- * @name: Model name of the chip
- * @width: Width of display in pixels
- * @height: Height of display in pixels
- * @setaddrwin: Which set_addr_win() implementation to use
- * @regwidth: LCD Controller Register width in bits
- * @init_seq: LCD initialization sequence
- * @init_seq_sz: Size of LCD initialization sequence
- */
-struct flexfb_lcd_controller {
- const char *name;
- unsigned int width;
- unsigned int height;
- unsigned int setaddrwin;
- unsigned int regwidth;
- const s16 *init_seq;
- int init_seq_sz;
-};
-
-static const struct flexfb_lcd_controller flexfb_chip_table[] = {
- {
- .name = "st7735r",
- .width = 120,
- .height = 160,
- .init_seq = st7735r_init,
- .init_seq_sz = ARRAY_SIZE(st7735r_init),
- },
- {
- .name = "hx8340bn",
- .width = 176,
- .height = 220,
- .init_seq = hx8340bn_init,
- .init_seq_sz = ARRAY_SIZE(hx8340bn_init),
- },
- {
- .name = "ili9225",
- .width = 176,
- .height = 220,
- .regwidth = 16,
- .init_seq = ili9225_init,
- .init_seq_sz = ARRAY_SIZE(ili9225_init),
- },
- {
- .name = "ili9320",
- .width = 240,
- .height = 320,
- .setaddrwin = 1,
- .regwidth = 16,
- .init_seq = ili9320_init,
- .init_seq_sz = ARRAY_SIZE(ili9320_init),
- },
- {
- .name = "ili9325",
- .width = 240,
- .height = 320,
- .setaddrwin = 1,
- .regwidth = 16,
- .init_seq = ili9325_init,
- .init_seq_sz = ARRAY_SIZE(ili9325_init),
- },
- {
- .name = "ili9341",
- .width = 240,
- .height = 320,
- .init_seq = ili9341_init,
- .init_seq_sz = ARRAY_SIZE(ili9341_init),
- },
- {
- .name = "ssd1289",
- .width = 240,
- .height = 320,
- .setaddrwin = 2,
- .regwidth = 16,
- .init_seq = ssd1289_init,
- .init_seq_sz = ARRAY_SIZE(ssd1289_init),
- },
- {
- .name = "ssd1351",
- .width = 128,
- .height = 128,
- .setaddrwin = 3,
- .init_seq = ssd1351_init,
- .init_seq_sz = ARRAY_SIZE(ssd1351_init),
- },
-};
-
-/* ili9320, ili9325 */
-static void flexfb_set_addr_win_1(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- switch (par->info->var.rotate) {
- /* R20h = Horizontal GRAM Start Address */
- /* R21h = Vertical GRAM Start Address */
- case 0:
- write_reg(par, 0x0020, xs);
- write_reg(par, 0x0021, ys);
- break;
- case 180:
- write_reg(par, 0x0020, width - 1 - xs);
- write_reg(par, 0x0021, height - 1 - ys);
- break;
- case 270:
- write_reg(par, 0x0020, width - 1 - ys);
- write_reg(par, 0x0021, xs);
- break;
- case 90:
- write_reg(par, 0x0020, ys);
- write_reg(par, 0x0021, height - 1 - xs);
- break;
- }
- write_reg(par, 0x0022); /* Write Data to GRAM */
-}
-
-/* ssd1289 */
-static void flexfb_set_addr_win_2(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- switch (par->info->var.rotate) {
- /* R4Eh - Set GDDRAM X address counter */
- /* R4Fh - Set GDDRAM Y address counter */
- case 0:
- write_reg(par, 0x4e, xs);
- write_reg(par, 0x4f, ys);
- break;
- case 180:
- write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
- write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
- break;
- case 270:
- write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
- write_reg(par, 0x4f, xs);
- break;
- case 90:
- write_reg(par, 0x4e, ys);
- write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
- break;
- }
-
- /* R22h - RAM data write */
- write_reg(par, 0x22, 0);
-}
-
-/* ssd1351 */
-static void set_addr_win_3(struct fbtft_par *par,
- int xs, int ys, int xe, int ye)
-{
- write_reg(par, 0x15, xs, xe);
- write_reg(par, 0x75, ys, ye);
- write_reg(par, 0x5C);
-}
-
-static int flexfb_verify_gpios_dc(struct fbtft_par *par)
-{
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (!par->gpio.dc) {
- dev_err(par->info->device,
- "Missing info about 'dc' gpio. Aborting.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int flexfb_verify_gpios_db(struct fbtft_par *par)
-{
- int i;
- int num_db = buswidth;
-
- fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
- if (!par->gpio.dc) {
- dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (!par->gpio.wr) {
- dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (latched && !par->gpio.latch) {
- dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
- return -EINVAL;
- }
- if (latched)
- num_db = buswidth / 2;
- for (i = 0; i < num_db; i++) {
- if (!par->gpio.db[i]) {
- dev_err(par->info->device,
- "Missing info about 'db%02d' gpio. Aborting.\n",
- i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
-{
- if (!width)
- width = chip->width;
- if (!height)
- height = chip->height;
- setaddrwin = chip->setaddrwin;
- if (chip->regwidth)
- regwidth = chip->regwidth;
- if (!init_num) {
- initp = chip->init_seq;
- initp_num = chip->init_seq_sz;
- }
-}
-
-static struct fbtft_display flex_display = { };
-
-static int flexfb_chip_init(const struct device *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
- if (!strcmp(chip, flexfb_chip_table[i].name)) {
- flexfb_chip_load_param(&flexfb_chip_table[i]);
- return 0;
- }
-
- dev_err(dev, "chip=%s is not supported\n", chip);
-
- return -EINVAL;
-}
-
-static int flexfb_probe_common(struct spi_device *sdev,
- struct platform_device *pdev)
-{
- struct device *dev;
- struct fb_info *info;
- struct fbtft_par *par;
- int ret;
-
- initp = init;
- initp_num = init_num;
-
- if (sdev)
- dev = &sdev->dev;
- else
- dev = &pdev->dev;
-
- fbtft_init_dbg(dev, "%s(%s)\n", __func__,
- sdev ? "'SPI device'" : "'Platform device'");
-
- if (chip) {
- ret = flexfb_chip_init(dev);
- if (ret)
- return ret;
- }
-
- if (width == 0 || height == 0) {
- dev_err(dev, "argument(s) missing: width and height has to be set.\n");
- return -EINVAL;
- }
- flex_display.width = width;
- flex_display.height = height;
- fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height);
- fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set");
- fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin);
- fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
- fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
-
- info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
- if (!info)
- return -ENOMEM;
-
- par = info->par;
- if (sdev)
- par->spi = sdev;
- else
- par->pdev = pdev;
- if (!par->init_sequence)
- par->init_sequence = initp;
- par->fbtftops.init_display = fbtft_init_display;
-
- /* registerwrite functions */
- switch (regwidth) {
- case 8:
- par->fbtftops.write_register = fbtft_write_reg8_bus8;
- break;
- case 16:
- par->fbtftops.write_register = fbtft_write_reg16_bus8;
- break;
- default:
- dev_err(dev,
- "argument 'regwidth': %d is not supported.\n",
- regwidth);
- return -EINVAL;
- }
-
- /* bus functions */
- if (sdev) {
- par->fbtftops.write = fbtft_write_spi;
- switch (buswidth) {
- case 8:
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
- if (!par->startbyte)
- par->fbtftops.verify_gpios = flexfb_verify_gpios_dc;
- break;
- case 9:
- if (regwidth == 16) {
- dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n",
- regwidth, buswidth);
- return -EINVAL;
- }
- par->fbtftops.write_register = fbtft_write_reg8_bus9;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
- if (par->spi->master->bits_per_word_mask
- & SPI_BPW_MASK(9)) {
- par->spi->bits_per_word = 9;
- break;
- }
-
- dev_warn(dev,
- "9-bit SPI not available, emulating using 8-bit.\n");
- /* allocate buffer with room for dc bits */
- par->extra = devm_kzalloc(par->info->device,
- par->txbuf.len
- + (par->txbuf.len / 8) + 8,
- GFP_KERNEL);
- if (!par->extra) {
- ret = -ENOMEM;
- goto out_release;
- }
- par->fbtftops.write = fbtft_write_spi_emulate_9;
-
- break;
- default:
- dev_err(dev,
- "argument 'buswidth': %d is not supported with SPI.\n",
- buswidth);
- return -EINVAL;
- }
- } else {
- par->fbtftops.verify_gpios = flexfb_verify_gpios_db;
- switch (buswidth) {
- case 8:
- par->fbtftops.write = fbtft_write_gpio8_wr;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
- break;
- case 16:
- par->fbtftops.write_register = fbtft_write_reg16_bus16;
- if (latched)
- par->fbtftops.write = fbtft_write_gpio16_wr_latched;
- else
- par->fbtftops.write = fbtft_write_gpio16_wr;
- par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
- break;
- default:
- dev_err(dev,
- "argument 'buswidth': %d is not supported with parallel.\n",
- buswidth);
- return -EINVAL;
- }
- }
-
- /* set_addr_win function */
- switch (setaddrwin) {
- case 0:
- /* use default */
- break;
- case 1:
- par->fbtftops.set_addr_win = flexfb_set_addr_win_1;
- break;
- case 2:
- par->fbtftops.set_addr_win = flexfb_set_addr_win_2;
- break;
- case 3:
- par->fbtftops.set_addr_win = set_addr_win_3;
- break;
- default:
- dev_err(dev, "argument 'setaddrwin': unknown value %d.\n",
- setaddrwin);
- return -EINVAL;
- }
-
- if (!nobacklight)
- par->fbtftops.register_backlight = fbtft_register_backlight;
-
- ret = fbtft_register_framebuffer(info);
- if (ret < 0)
- goto out_release;
-
- return 0;
-
-out_release:
- fbtft_framebuffer_release(info);
-
- return ret;
-}
-
-static int flexfb_remove_common(struct device *dev, struct fb_info *info)
-{
- struct fbtft_par *par;
-
- if (!info)
- return -EINVAL;
- par = info->par;
- if (par)
- fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
- __func__);
- fbtft_unregister_framebuffer(info);
- fbtft_framebuffer_release(info);
-
- return 0;
-}
-
-static int flexfb_probe_spi(struct spi_device *spi)
-{
- return flexfb_probe_common(spi, NULL);
-}
-
-static int flexfb_remove_spi(struct spi_device *spi)
-{
- struct fb_info *info = spi_get_drvdata(spi);
-
- return flexfb_remove_common(&spi->dev, info);
-}
-
-static int flexfb_probe_pdev(struct platform_device *pdev)
-{
- return flexfb_probe_common(NULL, pdev);
-}
-
-static int flexfb_remove_pdev(struct platform_device *pdev)
-{
- struct fb_info *info = platform_get_drvdata(pdev);
-
- return flexfb_remove_common(&pdev->dev, info);
-}
-
-static struct spi_driver flexfb_spi_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = flexfb_probe_spi,
- .remove = flexfb_remove_spi,
-};
-
-static const struct platform_device_id flexfb_platform_ids[] = {
- { "flexpfb", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
-
-static struct platform_driver flexfb_platform_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .id_table = flexfb_platform_ids,
- .probe = flexfb_probe_pdev,
- .remove = flexfb_remove_pdev,
-};
-
-static int __init flexfb_init(void)
-{
- int ret, ret2;
-
- ret = spi_register_driver(&flexfb_spi_driver);
- ret2 = platform_driver_register(&flexfb_platform_driver);
- if (ret < 0)
- return ret;
- return ret2;
-}
-
-static void __exit flexfb_exit(void)
-{
- spi_unregister_driver(&flexfb_spi_driver);
- platform_driver_unregister(&flexfb_platform_driver);
-}
-
-/* ------------------------------------------------------------------------- */
-
-module_init(flexfb_init);
-module_exit(flexfb_exit);
-
-MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/staging/hp/Kconfig
index fb395cfe6b92..fb395cfe6b92 100644
--- a/drivers/net/ethernet/hp/Kconfig
+++ b/drivers/staging/hp/Kconfig
diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/staging/hp/Makefile
index 5ed723bb11e2..5ed723bb11e2 100644
--- a/drivers/net/ethernet/hp/Makefile
+++ b/drivers/staging/hp/Makefile
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/staging/hp/hp100.c
index 6ec78f5c602f..6ec78f5c602f 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/staging/hp/hp100.c
diff --git a/drivers/net/ethernet/hp/hp100.h b/drivers/staging/hp/hp100.h
index 7239b94c9de5..7239b94c9de5 100644
--- a/drivers/net/ethernet/hp/hp100.h
+++ b/drivers/staging/hp/hp100.h
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index c64728fc21f2..83469061a542 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -261,11 +261,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
/* Build the PKO buffer pointer */
hw_buffer.u64 = 0;
if (skb_shinfo(skb)->nr_frags == 0) {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb->len;
} else {
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
@@ -273,11 +273,12 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr =
- XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
+ XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
hw_buffer.s.size = skb_frag_size(fs);
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
+ hw_buffer.s.addr =
+ XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.gather = 1;
@@ -349,10 +350,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/
dst_release(skb_dst(skb));
skb_dst_set(skb, NULL);
-#ifdef CONFIG_XFRM
- secpath_reset(skb);
-#endif
- nf_reset(skb);
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index a4ac3bfb62a8..b78ce9eaab85 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1202,7 +1202,7 @@ static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
{
- return (void *)(physical_address);
+ return (void *)(uintptr_t)(physical_address);
}
static inline uint64_t cvmx_ptr_to_phys(void *ptr)
diff --git a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
index 9ddd51685063..5792f491b59a 100644
--- a/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
+++ b/drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
@@ -409,7 +409,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->PTModeSS = 3;
else if (pRaInfo->HighestRate > 0x0b)
pRaInfo->PTModeSS = 2;
- else if (pRaInfo->HighestRate > 0x0b)
+ else if (pRaInfo->HighestRate > 0x03)
pRaInfo->PTModeSS = 1;
else
pRaInfo->PTModeSS = 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 664d93a7f90d..4fac9dca798e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
}
padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (!padapter->HalData)
- DBG_88E("cant not alloc memory for HAL DATA\n");
+ if (!padapter->HalData) {
+ DBG_88E("Failed to allocate memory for HAL data\n");
+ goto free_adapter;
+ }
/* step read_chip_version */
rtw_hal_read_chip_version(padapter);
diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup
new file mode 100644
index 000000000000..be3f5d6962e9
--- /dev/null
+++ b/drivers/staging/speakup/sysfs-driver-speakup
@@ -0,0 +1,369 @@
+What: /sys/accessibility/speakup/attrib_bleep
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Beeps the PC speaker when there is an attribute change such as
+ foreground or background color when using speakup review
+ commands. One = on, zero = off.
+
+What: /sys/accessibility/speakup/bell_pos
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This works much like a typewriter bell. If for example 72 is
+ echoed to bell_pos, it will beep the PC speaker when typing on
+ a line past character 72.
+
+What: /sys/accessibility/speakup/bleeps
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls whether one hears beeps through the PC speaker
+ when using speakup's review commands.
+ TODO: what values does it accept?
+
+What: /sys/accessibility/speakup/bleep_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls the duration of the PC speaker beeps speakup
+ produces.
+ TODO: What are the units? Jiffies?
+
+What: /sys/accessibility/speakup/cursor_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls cursor delay when using arrow keys. When a
+ connection is very slow, with the default setting, when moving
+ with the arrows, or backspacing etc. speakup says the incorrect
+ characters. Set this to a higher value to adjust for the delay
+ and better synchronisation between cursor position and speech.
+
+What: /sys/accessibility/speakup/delimiters
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Delimit a word from speakup.
+ TODO: add more info
+
+What: /sys/accessibility/speakup/ex_num
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/key_echo
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls if speakup speaks keys when they are typed. One = on,
+ zero = off or don't echo keys.
+
+What: /sys/accessibility/speakup/keymap
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Speakup keymap remaps keys to Speakup functions.
+ It uses a binary
+ format. A special program called genmap is needed to compile a
+ textual keymap into the binary format which is then loaded into
+ /sys/accessibility/speakup/keymap.
+
+What: /sys/accessibility/speakup/no_interrupt
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls if typing interrupts output from speakup. With
+ no_interrupt set to zero, typing on the keyboard will interrupt
+ speakup if for example
+ the say screen command is used before the
+ entire screen is read.
+ With no_interrupt set to one, if the say
+ screen command is used, and one then types on the keyboard,
+ speakup will continue to say the whole screen regardless until
+ it finishes.
+
+What: /sys/accessibility/speakup/punc_all
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is a list of all the punctuation speakup should speak when
+ punc_level is set to four.
+
+What: /sys/accessibility/speakup/punc_level
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls the level of punctuation spoken as the screen is
+ displayed, not reviewed. Levels range from zero no punctuation,
+ to four, all punctuation. One corresponds to punc_some, two
+ corresponds to punc_most, and three as well as four both
+ correspond to punc_all. Some hardware synthesizers may have
+ different levels each corresponding to three and four for
+ punc_level. Also note that if punc_level is set to zero, and
+ key_echo is set to one, typed punctuation is still spoken as it
+ is typed.
+
+What: /sys/accessibility/speakup/punc_most
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is a list of all the punctuation speakup should speak when
+ punc_level is set to two.
+
+What: /sys/accessibility/speakup/punc_some
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is a list of all the punctuation speakup should speak when
+ punc_level is set to one.
+
+What: /sys/accessibility/speakup/reading_punc
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Almost the same as punc_level, the differences being that
+ reading_punc controls the level of punctuation when reviewing
+ the screen with speakup's screen review commands. The other
+ difference is that reading_punc set to three speaks punc_all,
+ and reading_punc set to four speaks all punctuation, including
+ spaces.
+
+What: /sys/accessibility/speakup/repeats
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: A list of characters speakup repeats. Normally, when there are
+ more than three characters in a row, speakup
+ just reads three of
+ those characters. For example, "......" would be read as dot,
+ dot, dot. If a . is added to the list of characters in repeats,
+ "......" would be read as dot, dot, dot, times six.
+
+What: /sys/accessibility/speakup/say_control
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: If set to one, speakup speaks shift, alt and control when those
+ keys are pressed. If say_control is set to zero, shift, ctrl,
+ and alt are not spoken when they are pressed.
+
+What: /sys/accessibility/speakup/say_word_ctl
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/silent
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/spell_delay
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls how fast a word is spelled
+ when speakup's say word
+ review command is pressed twice quickly to speak the current
+ word being reviewed. Zero just speaks the letters one after
+ another, while values one through four
+ seem to introduce more of
+ a pause between the spelling of each letter by speakup.
+
+What: /sys/accessibility/speakup/synth
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the synthesizer driver currently in use. Reading
+ synth returns the synthesizer driver currently in use. Writing
+ synth switches to the given synthesizer driver, provided it is
+ either built into the kernel, or already loaded as a module.
+
+What: /sys/accessibility/speakup/synth_direct
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Sends whatever is written to synth_direct
+ directly to the speech synthesizer in use, bypassing speakup.
+ This could be used to make the synthesizer speak
+ a string, or to
+ send control sequences to the synthesizer to change how the
+ synthesizer behaves.
+
+What: /sys/accessibility/speakup/version
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Reading version returns the version of speakup, and the version
+ of the synthesizer driver currently in use.
+
+What: /sys/accessibility/speakup/i18n/announcements
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This file contains various general announcements, most of which
+ cannot be categorized. You will find messages such as "You
+ killed Speakup", "I'm alive", "leaving help", "parked",
+ "unparked", and others. You will also find the names of the
+ screen edges and cursor tracking modes here.
+
+What: /sys/accessibility/speakup/i18n/chartab
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO
+
+What: /sys/accessibility/speakup/i18n/ctl_keys
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Here, you will find names of control keys. These are used with
+ Speakup's say_control feature.
+
+What: /sys/accessibility/speakup/i18n/function_names
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Here, you will find a list of names for Speakup functions.
+ These are used by the help system. For example, suppose that
+ you have activated help mode, and you pressed
+ keypad 3. Speakup
+ says: "keypad 3 is character, say next."
+ The message "character, say next" names a Speakup function, and
+ it comes from this function_names file.
+
+What: /sys/accessibility/speakup/i18n/states
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This file contains names for key states.
+ Again, these are part of the help system. For instance, if you
+ had pressed speakup + keypad 3, you would hear:
+ "speakup keypad 3 is go to bottom edge."
+ The speakup key is depressed, so the name of the key state is
+ speakup.
+ This part of the message comes from the states collection.
+
+What: /sys/accessibility/speakup/i18n/characters
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Through this sys entry, Speakup gives you the ability to change
+ how Speakup pronounces a given character. You could, for
+ example, change how some punctuation characters are spoken. You
+ can even change how Speakup will pronounce certain letters. For
+ further details see '12. Changing the Pronunciation of
+ Characters' in Speakup User's Guide (file spkguide.txt in
+ source).
+
+What: /sys/accessibility/speakup/i18n/colors
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: When you use the "say attributes" function, Speakup says the
+ name of the foreground and background colors. These names come
+ from the i18n/colors file.
+
+What: /sys/accessibility/speakup/i18n/formatted
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This group of messages contains embedded formatting codes, to
+ specify the type and width of displayed data. If you change
+ these, you must preserve all of the formatting codes, and they
+ must appear in the order used by the default messages.
+
+What: /sys/accessibility/speakup/i18n/key_names
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Again, key_names is used by Speakup's help system. In the
+ previous example, Speakup said that you pressed "keypad 3."
+ This name came from the key_names file.
+
+What: /sys/accessibility/speakup/<synth-name>/
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: In `/sys/accessibility/speakup` is a directory corresponding to
+ the synthesizer driver currently in use (E.G) `soft` for the
+ soft driver. This directory contains files which control the
+ speech synthesizer itself,
+ as opposed to controlling the speakup
+ screen reader. The parameters in this directory have the same
+ names and functions across all
+ supported synthesizers. The range
+ of values for freq, pitch, rate, and vol is the same for all
+ supported synthesizers, with the given range being internally
+ mapped by the driver to more or less fit the range of values
+ supported for a given parameter by the individual synthesizer.
+ Below is a description of values and parameters for soft
+ synthesizer, which is currently the most commonly used.
+
+What: /sys/accessibility/speakup/soft/caps_start
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is the string that is sent to the synthesizer to cause it
+ to start speaking uppercase letters. For the soft synthesizer
+ and most others, this causes the pitch of the voice to rise
+ above the currently set pitch.
+
+What: /sys/accessibility/speakup/soft/caps_stop
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This is the string sent to the synthesizer to cause it to stop
+ speaking uppercase letters. In the case of the soft synthesizer
+ and most others, this returns the pitch of the voice
+ down to the
+ currently set pitch.
+
+What: /sys/accessibility/speakup/soft/delay_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/soft/direct
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Controls if punctuation is spoken by speakup, or by the
+ synthesizer.
+ For example, speakup speaks ">" as "greater", while
+ the espeak synthesizer used by the soft driver speaks "greater
+ than". Zero lets speakup speak the punctuation. One lets the
+ synthesizer itself speak punctuation.
+
+What: /sys/accessibility/speakup/soft/freq
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the frequency of the speech synthesizer. Range is
+ 0-9.
+
+What: /sys/accessibility/speakup/soft/full_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/soft/jiffy_delta
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: This controls how many jiffys the kernel gives to the
+ synthesizer. Setting this too high can make a system unstable,
+ or even crash it.
+
+What: /sys/accessibility/speakup/soft/pitch
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the pitch of the synthesizer. The range is 0-9.
+
+What: /sys/accessibility/speakup/soft/punct
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the amount of punctuation spoken by the
+ synthesizer. The range for the soft driver seems to be 0-2.
+ TODO: How is this related to speakup's punc_level, or
+ reading_punc.
+
+What: /sys/accessibility/speakup/soft/rate
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the rate of the synthesizer. Range is from zero
+ slowest, to nine fastest.
+
+What: /sys/accessibility/speakup/soft/tone
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the tone of the speech synthesizer. The range for
+ the soft driver seems to be 0-2. This seems to make no
+ difference if using espeak and the espeakup connector.
+ TODO: does espeakup support different tonalities?
+
+What: /sys/accessibility/speakup/soft/trigger_time
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: TODO:
+
+What: /sys/accessibility/speakup/soft/voice
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the voice used by the synthesizer if the
+ synthesizer can speak in more than one voice. The range for the
+ soft driver is 0-7. Note that while espeak supports multiple
+ voices, this parameter will not set the voice when the espeakup
+ connector is used between speakup and espeak.
+
+What: /sys/accessibility/speakup/soft/vol
+KernelVersion: 2.6
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the volume of the speech synthesizer. Range is 0-9,
+ with zero being the softest, and nine being the loudest.
+
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index bc1eaa3a0773..826016c3431a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -12,7 +12,7 @@
static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
@@ -29,7 +29,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000,
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 23fba01107b9..c6f9cf1913d2 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -289,6 +289,7 @@ int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
VC_AUDIO_MSG_TYPE_STOP, false);
}
+/* FIXME: this doesn't seem working as expected for "draining" */
int bcm2835_audio_drain(struct bcm2835_alsa_stream *alsa_stream)
{
struct vc_audio_msg m = {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c6bb4aaf9bd0..082302944c37 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1748,8 +1748,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
priv->hw->max_signal = 100;
- if (vnt_init(priv))
+ if (vnt_init(priv)) {
+ device_free_info(priv);
return -ENODEV;
+ }
device_print_info(priv);
pci_set_drvdata(pcid, priv);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index eee1998c4b18..fac38c842ac5 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
/* Set the encryption - we only support wep */
if (is_wep) {
if (sme->key) {
- if (sme->key_idx >= NUM_WEPKEYS) {
- err = -EINVAL;
- goto exit;
- }
+ if (sme->key_idx >= NUM_WEPKEYS)
+ return -EINVAL;
result = prism2_domibset_uint32(wlandev,
DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 04bf2acd3800..2d19f0e332b0 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1075,27 +1075,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
unsigned int size;
/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
- }
-
- /*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 391f39776c6a..6b9865c786ba 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -88,7 +88,7 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- struct dev_pm_qos_request qos_req;
+ struct freq_qos_request qos_req;
};
static DEFINE_IDA(cpufreq_ida);
@@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpufreq_cdev->cpufreq_state = state;
- return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+ return freq_qos_update_request(&cpufreq_cdev->qos_req,
cpufreq_cdev->freq_table[state].frequency);
}
@@ -615,9 +615,9 @@ __cpufreq_cooling_register(struct device_node *np,
cooling_ops = &cpufreq_cooling_ops;
}
- ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
- DEV_PM_QOS_MAX_FREQUENCY,
- cpufreq_cdev->freq_table[0].frequency);
+ ret = freq_qos_add_request(&policy->constraints,
+ &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
+ cpufreq_cdev->freq_table[0].frequency);
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
@@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np,
return cdev;
remove_qos_req:
- dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+ freq_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table:
@@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
mutex_unlock(&cooling_list_lock);
thermal_cooling_device_unregister(cdev);
- dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+ freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index e55c79eb6430..98361acd3053 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -968,6 +968,11 @@ static int __init n_hdlc_init(void)
} /* end of init_module() */
+#ifdef CONFIG_SPARC
+#undef __exitdata
+#define __exitdata
+#endif
+
static const char hdlc_unregister_ok[] __exitdata =
KERN_INFO "N_HDLC: line discipline unregistered\n";
static const char hdlc_unregister_fail[] __exitdata =
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 02c5aff58a74..8df89e9cd254 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -72,8 +72,8 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
{
struct serial_8250_men_mcb_data *data;
struct resource *mem;
- unsigned int num_ports;
- unsigned int i;
+ int num_ports;
+ int i;
void __iomem *membase;
mem = mcb_get_resource(mdev, IORESOURCE_MEM);
@@ -88,7 +88,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
mdev->id, num_ports);
- if (num_ports == 0 || num_ports > 4) {
+ if (num_ports <= 0 || num_ports > 4) {
dev_err(&mdev->dev, "unexpected number of ports: %u\n",
num_ports);
return -ENODEV;
@@ -133,7 +133,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
{
- unsigned int num_ports, i;
+ int num_ports, i;
struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
if (!data)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index c68e2b3a1634..836e736ae188 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -141,7 +141,7 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial8250_do_set_mctrl(port, mctrl);
- if (!up->gpios) {
+ if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) {
/*
* Turn off autoRTS if RTS is lowered and restore autoRTS
* setting if RTS is raised
@@ -456,7 +456,8 @@ static void omap_8250_set_termios(struct uart_port *port,
up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW &&
- !up->gpios) {
+ !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) &&
+ !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) {
/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
priv->efr |= UART_EFR_CTS;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 4789b5d62f63..67a9eb3f94ce 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1032,6 +1032,7 @@ config SERIAL_SIFIVE_CONSOLE
bool "Console on SiFive UART"
depends on SERIAL_SIFIVE=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Select this option if you would like to use a SiFive UART as the
system console.
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 68d74f2b5106..a32f0d2afd59 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -3,7 +3,7 @@
* Freescale linflexuart serial port driver
*
* Copyright 2012-2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
*/
#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
@@ -246,12 +246,14 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
struct tty_port *port = &sport->state->port;
unsigned long flags, status;
unsigned char rx;
+ bool brk;
spin_lock_irqsave(&sport->lock, flags);
status = readl(sport->membase + UARTSR);
while (status & LINFLEXD_UARTSR_RMB) {
rx = readb(sport->membase + BDRM);
+ brk = false;
flg = TTY_NORMAL;
sport->icount.rx++;
@@ -261,8 +263,11 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
status |= LINFLEXD_UARTSR_SZF;
if (status & LINFLEXD_UARTSR_BOF)
status |= LINFLEXD_UARTSR_BOF;
- if (status & LINFLEXD_UARTSR_FEF)
+ if (status & LINFLEXD_UARTSR_FEF) {
+ if (!rx)
+ brk = true;
status |= LINFLEXD_UARTSR_FEF;
+ }
if (status & LINFLEXD_UARTSR_PE)
status |= LINFLEXD_UARTSR_PE;
}
@@ -271,13 +276,15 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
sport->membase + UARTSR);
status = readl(sport->membase + UARTSR);
- if (uart_handle_sysrq_char(sport, (unsigned char)rx))
- continue;
-
+ if (brk) {
+ uart_handle_break(sport);
+ } else {
#ifdef SUPPORT_SYSRQ
- sport->sysrq = 0;
+ if (uart_handle_sysrq_char(sport, (unsigned char)rx))
+ continue;
#endif
- tty_insert_flip_char(port, rx, flg);
+ tty_insert_flip_char(port, rx, flg);
+ }
}
spin_unlock_irqrestore(&sport->lock, flags);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3e17bb8a0b16..537896c4d887 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -548,7 +548,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
lpuart32_write(&sport->port, val, UARTFIFO);
} else {
- val = readb(sport->port.membase + UARTPFIFO);
+ val = readb(sport->port.membase + UARTCFIFO);
val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
writeb(val, sport->port.membase + UARTCFIFO);
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 87c58f9f6390..5e08f2657b90 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2222,8 +2222,8 @@ static int imx_uart_probe(struct platform_device *pdev)
return PTR_ERR(base);
rxirq = platform_get_irq(pdev, 0);
- txirq = platform_get_irq(pdev, 1);
- rtsirq = platform_get_irq(pdev, 2);
+ txirq = platform_get_irq_optional(pdev, 1);
+ rtsirq = platform_get_irq_optional(pdev, 2);
sport->port.dev = &pdev->dev;
sport->port.mapbase = res->start;
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 03963af77b15..d2d8b3494685 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -740,7 +740,7 @@ static int __init owl_uart_init(void)
return ret;
}
-static void __init owl_uart_exit(void)
+static void __exit owl_uart_exit(void)
{
platform_driver_unregister(&owl_uart_platform_driver);
uart_unregister_driver(&owl_uart_driver);
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index c1b0d7662ef9..ff9a27d48bca 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -815,7 +815,7 @@ static int __init rda_uart_init(void)
return ret;
}
-static void __init rda_uart_exit(void)
+static void __exit rda_uart_exit(void)
{
platform_driver_unregister(&rda_uart_platform_driver);
uart_unregister_driver(&rda_uart_driver);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6e713be1d4e9..c4a414a46c7f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1964,8 +1964,10 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
* console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
*
* The optional form
+ *
* earlycon=<name>,0x<addr>,<options>
* console=<name>,0x<addr>,<options>
+ *
* is also accepted; the returned @iotype will be UPIO_MEM.
*
* Returns 0 on success or -EINVAL on failure
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index d9074303c88e..fb4781292d40 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -66,6 +66,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
+ if (gpios == NULL)
+ return NULL;
+
return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4e754a4850e6..22e5d4e13714 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2894,8 +2894,12 @@ static int sci_init_single(struct platform_device *dev,
port->mapbase = res->start;
sci_port->reg_size = resource_size(res);
- for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
- sci_port->irqs[i] = platform_get_irq(dev, i);
+ for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) {
+ if (i)
+ sci_port->irqs[i] = platform_get_irq_optional(dev, i);
+ else
+ sci_port->irqs[i] = platform_get_irq(dev, i);
+ }
/* The SCI generates several interrupts. They can be muxed together or
* connected to different interrupt lines. In the muxed case only one
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index b8b912b5a8b9..06e79c11141d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -897,7 +897,8 @@ static int __init ulite_init(void)
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- uart_unregister_driver(&ulite_uart_driver);
+ if (ulite_uart_driver.state)
+ uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index da4563aaaf5c..4e55bc327a54 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1550,7 +1550,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
goto err_out_id;
}
- uartps_major = cdns_uart_uart_driver->tty_driver->major;
cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
/*
@@ -1680,6 +1679,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
console_port = NULL;
#endif
+ uartps_major = cdns_uart_uart_driver->tty_driver->major;
cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
"cts-override");
return 0;
@@ -1741,6 +1741,12 @@ static int cdns_uart_remove(struct platform_device *pdev)
console_port = NULL;
#endif
+ /* If this is last instance major number should be initialized */
+ mutex_lock(&bitmap_lock);
+ if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
+ uartps_major = 0;
+ mutex_unlock(&bitmap_lock);
+
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
return rc;
}
diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index c41ddb61b857..b0a29efe7d31 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -159,8 +159,9 @@ static int cdns3_pci_probe(struct pci_dev *pdev,
wrap->plat_dev = platform_device_register_full(&plat_info);
if (IS_ERR(wrap->plat_dev)) {
pci_disable_device(pdev);
+ err = PTR_ERR(wrap->plat_dev);
kfree(wrap);
- return PTR_ERR(wrap->plat_dev);
+ return err;
}
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 06f1e105be4e..c2123ef8d8a3 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -160,10 +160,30 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
if (ret)
goto err;
- if (cdns->dr_mode != USB_DR_MODE_OTG) {
+ /* Initialize idle role to start with */
+ ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+ if (ret)
+ goto err;
+
+ switch (cdns->dr_mode) {
+ case USB_DR_MODE_OTG:
ret = cdns3_hw_role_switch(cdns);
if (ret)
goto err;
+ break;
+ case USB_DR_MODE_PERIPHERAL:
+ ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+ if (ret)
+ goto err;
+ break;
+ case USB_DR_MODE_HOST:
+ ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+ if (ret)
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
}
return ret;
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index 44f652e8b5a2..e71240b386b4 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -234,9 +234,11 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev,
static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl)
{
+ struct cdns3_endpoint *priv_ep;
__le16 *response_pkt;
u16 usb_status = 0;
u32 recip;
+ u8 index;
recip = ctrl->bRequestType & USB_RECIP_MASK;
@@ -262,9 +264,13 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
case USB_RECIP_INTERFACE:
return cdns3_ep0_delegate_req(priv_dev, ctrl);
case USB_RECIP_ENDPOINT:
- /* check if endpoint is stalled */
+ index = cdns3_ep_addr_to_index(ctrl->wIndex);
+ priv_ep = priv_dev->eps[index];
+
+ /* check if endpoint is stalled or stall is pending */
cdns3_select_ep(priv_dev, ctrl->wIndex);
- if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)))
+ if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
+ (priv_ep->flags & EP_STALL_PENDING))
usb_status = BIT(USB_ENDPOINT_HALT);
break;
default:
@@ -332,7 +338,7 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
* for sending status stage.
* This time should be less then 3ms.
*/
- usleep_range(1000, 2000);
+ mdelay(1);
cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
USB_CMD_STMODE |
USB_STS_TMODE_SEL(tmode - 1));
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 228cdc4ab886..9050b380ab83 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -1145,6 +1145,14 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
request = cdns3_next_request(&priv_ep->pending_req_list);
priv_req = to_cdns3_request(request);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+
+ /* Request was dequeued and TRB was changed to TRB_LINK. */
+ if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
+ trace_cdns3_complete_trb(priv_ep, trb);
+ cdns3_move_deq_to_next_trb(priv_req);
+ }
+
/* Re-select endpoint. It could be changed by other CPU during
* handling usb_gadget_giveback_request.
*/
@@ -2067,6 +2075,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
struct usb_request *req, *req_temp;
struct cdns3_request *priv_req;
struct cdns3_trb *link_trb;
+ u8 req_on_hw_ring = 0;
unsigned long flags;
int ret = 0;
@@ -2083,8 +2092,10 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
list) {
- if (request == req)
+ if (request == req) {
+ req_on_hw_ring = 1;
goto found;
+ }
}
list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
@@ -2096,27 +2107,21 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
goto not_found;
found:
-
- if (priv_ep->wa1_trb == priv_req->trb)
- cdns3_wa1_restore_cycle_bit(priv_ep);
-
link_trb = priv_req->trb;
- cdns3_move_deq_to_next_trb(priv_req);
- cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
-
- /* Update ring */
- request = cdns3_next_request(&priv_ep->deferred_req_list);
- if (request) {
- priv_req = to_cdns3_request(request);
+ /* Update ring only if removed request is on pending_req_list list */
+ if (req_on_hw_ring) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
(priv_req->start_trb * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |
- TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
- } else {
- priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
+ TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+
+ if (priv_ep->wa1_trb == priv_req->trb)
+ cdns3_wa1_restore_cycle_bit(priv_ep);
}
+ cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+
not_found:
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;
@@ -2571,6 +2576,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
switch (max_speed) {
case USB_SPEED_FULL:
writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+ writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
break;
case USB_SPEED_HIGH:
writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
@@ -2662,6 +2668,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
{
int ret = 0;
+ /* Ensure 32-bit DMA Mask in case we switched back from Host mode */
+ ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
+ return ret;
+ }
+
cdns3_drd_switch_gadget(cdns, 1);
pm_runtime_get_sync(cdns->dev);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 7fea4999d352..0d8e3f3804a3 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
kfree(usblp->readbuf);
kfree(usblp->device_id_string);
kfree(usblp->statusbuf);
+ usb_put_intf(usblp->intf);
kfree(usblp);
}
@@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file)
mutex_lock(&usblp_mutex);
usblp->used = 0;
- if (usblp->present) {
+ if (usblp->present)
usblp_unlink_urbs(usblp);
- usb_autopm_put_interface(usblp->intf);
- } else /* finish cleanup from disconnect */
+
+ usb_autopm_put_interface(usblp->intf);
+
+ if (!usblp->present) /* finish cleanup from disconnect */
usblp_cleanup(usblp);
mutex_unlock(&usblp_mutex);
return 0;
@@ -1111,7 +1114,7 @@ static int usblp_probe(struct usb_interface *intf,
init_waitqueue_head(&usblp->wwait);
init_usb_anchor(&usblp->urbs);
usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- usblp->intf = intf;
+ usblp->intf = usb_get_intf(intf);
/* Malloc device ID string buffer to the largest expected length,
* since we can re-query it on an ioctl and a dynamic string
@@ -1196,6 +1199,7 @@ abort:
kfree(usblp->readbuf);
kfree(usblp->statusbuf);
kfree(usblp->device_id_string);
+ usb_put_intf(usblp->intf);
kfree(usblp);
abort_ret:
return retval;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 726100d1ac0d..c946d64142ad 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -139,14 +139,14 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "otg");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "otg");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
@@ -157,9 +157,6 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
if (irq > 0)
goto out;
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing OTG IRQ\n");
-
if (!irq)
irq = -EINVAL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8adb59f8e4f1..86dc1db788a9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3264,14 +3264,14 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
@@ -3282,9 +3282,6 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
if (irq > 0)
goto out;
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing peripheral IRQ\n");
-
if (!irq)
irq = -EINVAL;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 8deea8c91e03..5567ed2cddbe 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -16,14 +16,14 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
- irq = platform_get_irq_byname(dwc3_pdev, "host");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
if (irq > 0)
goto out;
if (irq == -EPROBE_DEFER)
goto out;
- irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
if (irq > 0)
goto out;
@@ -34,9 +34,6 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
if (irq > 0)
goto out;
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing host IRQ\n");
-
if (!irq)
irq = -EINVAL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d7e611645533..d354036ff6c8 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -45,7 +45,7 @@ config USB_AT91
config USB_LPC32XX
tristate "LPC32XX USB Peripheral Controller"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
depends on I2C
select USB_ISP1301
help
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 8414fac74493..3d499d93c083 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -48,6 +48,7 @@
#define DRIVER_VERSION "02 May 2005"
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900 /* in mA */
static const char driver_name[] = "dummy_hcd";
static const char driver_desc[] = "USB Host+Gadget Emulator";
@@ -2432,7 +2433,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
- dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+ dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index b3e073fb88c6..bf6c81e2f8cc 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1151,7 +1151,7 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */
- switch (((u32) data) & 0x3) {
+ switch (((uintptr_t) data) & 0x3) {
case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
@@ -1177,11 +1177,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
bl = bytes - n;
- if (bl > 3)
- bl = 3;
+ if (bl > 4)
+ bl = 4;
for (i = 0; i < bl; i++)
- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
}
break;
@@ -1252,7 +1252,7 @@ static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */
- switch (((u32) data) & 0x3) {
+ switch (((uintptr_t) data) & 0x3) {
case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c
index f498160df969..3351d07c431f 100644
--- a/drivers/usb/host/xhci-ext-caps.c
+++ b/drivers/usb/host/xhci-ext-caps.c
@@ -57,6 +57,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
ret = platform_device_add_properties(pdev, role_switch_props);
if (ret) {
dev_err(dev, "failed to register device properties\n");
+ platform_device_put(pdev);
return ret;
}
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9741cdeea9d7..85ceb43e3405 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3202,10 +3202,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
if (usb_urb_dir_out(urb)) {
len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
seg->bounce_buf, new_buff_len, enqd_len);
- if (len != seg->bounce_len)
+ if (len != new_buff_len)
xhci_warn(xhci,
"WARN Wrong bounce buffer write length: %zu != %d\n",
- len, seg->bounce_len);
+ len, new_buff_len);
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 500865975687..517ec3206f6e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1032,7 +1032,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
writel(command, &xhci->op_regs->command);
xhci->broken_suspend = 0;
if (xhci_handshake(&xhci->op_regs->status,
- STS_SAVE, 0, 10 * 1000)) {
+ STS_SAVE, 0, 20 * 1000)) {
/*
* AMD SNPS xHC 3.0 occasionally does not clear the
* SSS bit of USBSTS and when driver tries to poll
@@ -1108,6 +1108,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
hibernated = true;
if (!hibernated) {
+ /*
+ * Some controllers might lose power during suspend, so wait
+ * for controller not ready bit to clear, just as in xHC init.
+ */
+ retval = xhci_handshake(&xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
+ if (retval) {
+ xhci_warn(xhci, "Controller not ready at resume %d\n",
+ retval);
+ spin_unlock_irq(&xhci->lock);
+ return retval;
+ }
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
@@ -3083,6 +3095,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
u32 ep_flag;
+ int err;
xhci = hcd_to_xhci(hcd);
if (!host_ep->hcpriv)
@@ -3142,7 +3155,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_free_command(xhci, cfg_cmd);
goto cleanup;
}
- xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+ err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+ ep_index, 0);
+ if (err < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+ __func__, err);
+ goto cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3156,8 +3179,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
ctrl_ctx, ep_flag, ep_flag);
xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
- xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+ err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
udev->slot_id, false);
+ if (err < 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
+ xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+ __func__, err);
+ goto cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -4674,12 +4705,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
desc, state, timeout);
- /* If we found we can't enable hub-initiated LPM, or
+ /* If we found we can't enable hub-initiated LPM, and
* the U1 or U2 exit latency was too high to allow
- * device-initiated LPM as well, just stop searching.
+ * device-initiated LPM as well, then we will disable LPM
+ * for this device, so stop searching any further.
*/
- if (alt_timeout == USB3_LPM_DISABLED ||
- alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+ if (alt_timeout == USB3_LPM_DISABLED) {
*timeout = alt_timeout;
return -E2BIG;
}
@@ -4790,10 +4821,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
if (driver && driver->disable_hub_initiated_lpm) {
- dev_dbg(&udev->dev, "Hub-initiated %s disabled "
- "at request of driver %s\n",
- state_name, driver->name);
- return xhci_get_timeout_no_hub_lpm(udev, state);
+ dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+ state_name, driver->name);
+ timeout = xhci_get_timeout_no_hub_lpm(udev,
+ state);
+ if (timeout == USB3_LPM_DISABLED)
+ return timeout;
}
}
@@ -5077,11 +5110,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
hcd->has_tt = 1;
} else {
/*
- * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
- * minor revision instead of sbrn. Minor revision is a two digit
- * BCD containing minor and sub-minor numbers, only show minor.
+ * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+ * should return 0x31 for sbrn, or that the minor revision
+ * is a two digit BCD containig minor and sub-minor numbers.
+ * This was later clarified in xHCI 1.2.
+ *
+ * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+ * minor revision set to 0x1 instead of 0x10.
*/
- minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+ if (xhci->usb3_rhub.min_rev == 0x1)
+ minor_rev = 1;
+ else
+ minor_rev = xhci->usb3_rhub.min_rev / 0x10;
switch (minor_rev) {
case 2:
@@ -5198,8 +5238,16 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
+ /*
+ * udev might be NULL if tt buffer is cleared during a failed device
+ * enumeration due to a halted control endpoint. Usb core might
+ * have allocated a new udev for the next enumeration attempt.
+ */
+
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *)ep->hcpriv;
+ if (!udev)
+ return;
slot_id = udev->slot_id;
ep_index = xhci_get_endpoint_index(&ep->desc);
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 0a57c2cc8e5a..7a6b122c833f 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -716,6 +716,10 @@ static int mts_usb_probe(struct usb_interface *intf,
}
+ if (ep_in_current != &ep_in_set[2]) {
+ MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+ return -ENODEV;
+ }
if ( ep_out == -1 ) {
MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index bdae62b2ffe0..9bce583aada3 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -47,16 +47,6 @@ config USB_SEVSEG
To compile this driver as a module, choose M here: the
module will be called usbsevseg.
-config USB_RIO500
- tristate "USB Diamond Rio500 support"
- help
- Say Y here if you want to connect a USB Rio500 mp3 player to your
- computer's USB port. Please read <file:Documentation/usb/rio.rst>
- for more information.
-
- To compile this driver as a module, choose M here: the
- module will be called rio500.
-
config USB_LEGOTOWER
tristate "USB Lego Infrared Tower support"
help
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 109f54f5b9aa..0d416eb624bb 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
obj-$(CONFIG_USB_LCD) += usblcd.o
obj-$(CONFIG_USB_LD) += ldusb.o
obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
-obj-$(CONFIG_USB_RIO500) += rio500.o
obj-$(CONFIG_USB_TEST) += usbtest.o
obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 344d523b0502..6f5edb9fc61e 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -75,6 +75,7 @@ struct adu_device {
char serial_number[8];
int open_count; /* number of times this port has been opened */
+ unsigned long disconnected:1;
char *read_buffer_primary;
int read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
{
unsigned long flags;
- if (dev->udev == NULL)
+ if (dev->disconnected)
return;
/* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
kfree(dev->read_buffer_secondary);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
+ usb_put_dev(dev->udev);
kfree(dev);
}
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
}
dev = usb_get_intfdata(interface);
- if (!dev || !dev->udev) {
+ if (!dev) {
retval = -ENODEV;
goto exit_no_device;
}
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
}
adu_release_internal(dev);
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
if (!dev->open_count) /* ... and we're the last user */
adu_delete(dev);
@@ -354,7 +356,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
return -ERESTARTSYS;
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
@@ -518,7 +520,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
goto exit_nolock;
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
@@ -663,7 +665,7 @@ static int adu_probe(struct usb_interface *interface,
mutex_init(&dev->mtx);
spin_lock_init(&dev->buflock);
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
@@ -762,14 +764,18 @@ static void adu_disconnect(struct usb_interface *interface)
dev = usb_get_intfdata(interface);
- mutex_lock(&dev->mtx); /* not interruptible */
- dev->udev = NULL; /* poison */
usb_deregister_dev(interface, &adu_class);
- mutex_unlock(&dev->mtx);
+
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
mutex_lock(&adutux_mutex);
usb_set_intfdata(interface, NULL);
+ mutex_lock(&dev->mtx); /* not interruptible */
+ dev->disconnected = 1;
+ mutex_unlock(&dev->mtx);
+
/* if the device is not opened, then we clean up right now */
if (!dev->open_count)
adu_delete(dev);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index cf5828ce927a..34e6cd6f40d3 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
usb_free_urb(dev->urb);
kfree(dev->name);
kfree(dev->buf);
+ usb_put_intf(dev->interface);
kfree(dev);
}
}
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
if (dev == NULL)
goto out;
+ dev->interface = usb_get_intf(interface);
+
dev->buf = kmalloc(size, GFP_KERNEL);
if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
goto out;
}
- dev->interface = interface;
-
dev->in_ep = in_ep;
if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index f5bed9f29e56..dce44fbf031f 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -54,11 +54,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-/* Module parameters */
-static DEFINE_MUTEX(iowarrior_mutex);
-
static struct usb_driver iowarrior_driver;
-static DEFINE_MUTEX(iowarrior_open_disc_lock);
/*--------------*/
/* data */
@@ -87,6 +83,7 @@ struct iowarrior {
char chip_serial[9]; /* the serial number string of the chip connected */
int report_size; /* number of bytes in a report */
u16 product_id;
+ struct usb_anchor submitted;
};
/*--------------*/
@@ -243,6 +240,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
kfree(dev->int_in_buffer);
usb_free_urb(dev->int_in_urb);
kfree(dev->read_queue);
+ usb_put_intf(dev->interface);
kfree(dev);
}
@@ -424,11 +422,13 @@ static ssize_t iowarrior_write(struct file *file,
retval = -EFAULT;
goto error;
}
+ usb_anchor_urb(int_out_urb, &dev->submitted);
retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
if (retval) {
dev_dbg(&dev->interface->dev,
"submit error %d for urb nr.%d\n",
retval, atomic_read(&dev->write_busy));
+ usb_unanchor_urb(int_out_urb);
goto error;
}
/* submit was ok */
@@ -477,8 +477,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
if (!buffer)
return -ENOMEM;
- /* lock this object */
- mutex_lock(&iowarrior_mutex);
mutex_lock(&dev->mutex);
/* verify that the device wasn't unplugged */
@@ -571,7 +569,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
error_out:
/* unlock the device */
mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_mutex);
kfree(buffer);
return retval;
}
@@ -586,27 +583,20 @@ static int iowarrior_open(struct inode *inode, struct file *file)
int subminor;
int retval = 0;
- mutex_lock(&iowarrior_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
- mutex_unlock(&iowarrior_mutex);
- printk(KERN_ERR "%s - error, can't find device for minor %d\n",
+ pr_err("%s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
- mutex_lock(&iowarrior_open_disc_lock);
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&iowarrior_open_disc_lock);
- mutex_unlock(&iowarrior_mutex);
+ if (!dev)
return -ENODEV;
- }
mutex_lock(&dev->mutex);
- mutex_unlock(&iowarrior_open_disc_lock);
/* Only one process can open each device, no sharing. */
if (dev->opened) {
@@ -628,7 +618,6 @@ static int iowarrior_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_mutex);
return retval;
}
@@ -764,11 +753,13 @@ static int iowarrior_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->write_wait);
dev->udev = udev;
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ init_usb_anchor(&dev->submitted);
+
res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
if (res) {
dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -836,7 +827,6 @@ static int iowarrior_probe(struct usb_interface *interface,
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev, "Not able to get a minor for this device.\n");
- usb_set_intfdata(interface, NULL);
goto error;
}
@@ -860,26 +850,15 @@ error:
*/
static void iowarrior_disconnect(struct usb_interface *interface)
{
- struct iowarrior *dev;
- int minor;
-
- dev = usb_get_intfdata(interface);
- mutex_lock(&iowarrior_open_disc_lock);
- usb_set_intfdata(interface, NULL);
- /* prevent device read, write and ioctl */
- dev->present = 0;
-
- minor = dev->minor;
- mutex_unlock(&iowarrior_open_disc_lock);
- /* give back our minor - this will call close() locks need to be dropped at this point*/
+ struct iowarrior *dev = usb_get_intfdata(interface);
+ int minor = dev->minor;
usb_deregister_dev(interface, &iowarrior_class);
mutex_lock(&dev->mutex);
/* prevent device read, write and ioctl */
-
- mutex_unlock(&dev->mutex);
+ dev->present = 0;
if (dev->opened) {
/* There is a process that holds a filedescriptor to the device ,
@@ -887,10 +866,13 @@ static void iowarrior_disconnect(struct usb_interface *interface)
Deleting the device is postponed until close() was called.
*/
usb_kill_urb(dev->int_in_urb);
+ usb_kill_anchored_urbs(&dev->submitted);
wake_up_interruptible(&dev->read_wait);
wake_up_interruptible(&dev->write_wait);
+ mutex_unlock(&dev->mutex);
} else {
/* no process is using the device, cleanup now */
+ mutex_unlock(&dev->mutex);
iowarrior_delete(dev);
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 6581774bdfa4..15b5f06fb0b3 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
struct ld_usb {
struct mutex mutex; /* locks this structure */
struct usb_interface *intf; /* save off the usb interface pointer */
+ unsigned long disconnected:1;
int open_count; /* number of times this port has been opened */
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
/* shutdown transfer */
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
- if (dev->intf)
- usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_in_urb);
}
if (dev->interrupt_out_busy)
- if (dev->intf)
- usb_kill_urb(dev->interrupt_out_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
}
/**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
*/
static void ld_usb_delete(struct ld_usb *dev)
{
- ld_usb_abort_transfers(dev);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
resubmit:
/* resubmit if we're still running */
- if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+ if (dev->interrupt_in_running && !dev->buffer_overflow) {
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->intf->dev,
@@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file)
goto exit;
}
- if (mutex_lock_interruptible(&dev->mutex)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
+ mutex_lock(&dev->mutex);
if (dev->open_count != 1) {
retval = -ENODEV;
goto unlock_exit;
}
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->mutex);
/* unlock here as ld_usb_delete frees dev */
@@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
dev = file->private_data;
- if (!dev->intf)
+ if (dev->disconnected)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
}
/* verify that the device wasn't unplugged */
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
/* wait for data */
spin_lock_irq(&dev->rbsl);
- if (dev->ring_head == dev->ring_tail) {
+ while (dev->ring_head == dev->ring_tail) {
dev->interrupt_in_done = 0;
spin_unlock_irq(&dev->rbsl);
if (file->f_flags & O_NONBLOCK) {
@@ -480,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
if (retval < 0)
goto unlock_exit;
- } else {
- spin_unlock_irq(&dev->rbsl);
+
+ spin_lock_irq(&dev->rbsl);
}
+ spin_unlock_irq(&dev->rbsl);
/* actual_buffer contains actual_length + interrupt_in_buffer */
actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+ retval = -EIO;
+ goto unlock_exit;
+ }
bytes_to_read = min(count, *actual_buffer);
if (bytes_to_read < *actual_buffer)
dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
}
/* verify that the device wasn't unplugged */
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -696,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
- dev->ring_buffer =
- kmalloc_array(ring_buffer_size,
- sizeof(size_t) + dev->interrupt_in_endpoint_size,
- GFP_KERNEL);
+ dev->ring_buffer = kcalloc(ring_buffer_size,
+ sizeof(size_t) + dev->interrupt_in_endpoint_size,
+ GFP_KERNEL);
if (!dev->ring_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
@@ -764,6 +762,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
/* give back our minor */
usb_deregister_dev(intf, &ld_usb_class);
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
+
mutex_lock(&dev->mutex);
/* if the device is not opened, then we clean up right now */
@@ -771,7 +772,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
mutex_unlock(&dev->mutex);
ld_usb_delete(dev);
} else {
- dev->intf = NULL;
+ dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 006cf13b2199..23061f1526b4 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
};
MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -191,6 +190,7 @@ struct lego_usb_tower {
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
+ unsigned long disconnected:1;
char* read_buffer;
size_t read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
*/
static inline void tower_delete (struct lego_usb_tower *dev)
{
- tower_abort_transfers (dev);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree (dev->read_buffer);
kfree (dev->interrupt_in_buffer);
kfree (dev->interrupt_out_buffer);
+ usb_put_dev(dev->udev);
kfree (dev);
}
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
goto exit;
}
- mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
-
if (!dev) {
- mutex_unlock(&open_disc_mutex);
retval = -ENODEV;
goto exit;
}
/* lock this device */
if (mutex_lock_interruptible(&dev->lock)) {
- mutex_unlock(&open_disc_mutex);
retval = -ERESTARTSYS;
goto exit;
}
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
/* allow opening only once */
if (dev->open_count) {
- mutex_unlock(&open_disc_mutex);
retval = -EBUSY;
goto unlock_exit;
}
- dev->open_count = 1;
- mutex_unlock(&open_disc_mutex);
/* reset the tower */
result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
dev->interrupt_in_running = 0;
- dev->open_count = 0;
goto unlock_exit;
}
/* save device in the file's private structure */
file->private_data = dev;
+ dev->open_count = 1;
+
unlock_exit:
mutex_unlock(&dev->lock);
@@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file)
if (dev == NULL) {
retval = -ENODEV;
- goto exit_nolock;
- }
-
- mutex_lock(&open_disc_mutex);
- if (mutex_lock_interruptible(&dev->lock)) {
- retval = -ERESTARTSYS;
goto exit;
}
+ mutex_lock(&dev->lock);
+
if (dev->open_count != 1) {
dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
__func__);
retval = -ENODEV;
goto unlock_exit;
}
- if (dev->udev == NULL) {
+
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
@@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file)
unlock_exit:
mutex_unlock(&dev->lock);
-
exit:
- mutex_unlock(&open_disc_mutex);
-exit_nolock:
return retval;
}
@@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
mb();
- if (dev->udev)
- usb_kill_urb (dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_in_urb);
}
- if (dev->interrupt_out_busy && dev->udev)
+ if (dev->interrupt_out_busy)
usb_kill_urb(dev->interrupt_out_urb);
}
@@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
dev = file->private_data;
- if (!dev->udev)
+ if (dev->disconnected)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
}
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
}
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
resubmit:
/* resubmit if we're still running */
- if (dev->interrupt_in_running && dev->udev) {
+ if (dev->interrupt_in_running) {
retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->udev->dev,
@@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
mutex_init(&dev->lock);
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
dev->open_count = 0;
+ dev->disconnected = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
@@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply,
sizeof(*get_version_reply),
1000);
- if (result < 0) {
- dev_err(idev, "LEGO USB Tower get version control request failed\n");
+ if (result != sizeof(*get_version_reply)) {
+ if (result >= 0)
+ result = -EIO;
+ dev_err(idev, "get version request failed: %d\n", result);
retval = result;
goto error;
}
@@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
- usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
@@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface)
int minor;
dev = usb_get_intfdata (interface);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata (interface, NULL);
minor = dev->minor;
- /* give back our minor */
+ /* give back our minor and prevent further open() */
usb_deregister_dev (interface, &tower_class);
+ /* stop I/O */
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
+
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
tower_delete (dev);
} else {
- dev->udev = NULL;
+ dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644
index 30cae5e1954d..000000000000
--- a/drivers/usb/misc/rio500.c
+++ /dev/null
@@ -1,554 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/*
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- *
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003 replaced lock/unlock kernel with up/down
- * Daniele Bellucci bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR 64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
- struct usb_device *rio_dev; /* init: probe_rio */
- unsigned int ifnum; /* Interface number of the USB device */
- int isopen; /* nz if open */
- int present; /* Device is present on the bus */
- char *obuf, *ibuf; /* transfer buffers */
- char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
- wait_queue_head_t wait_q; /* for timeouts */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- /* against disconnect() */
- mutex_lock(&rio500_mutex);
-
- if (rio->isopen || !rio->present) {
- mutex_unlock(&rio500_mutex);
- return -EBUSY;
- }
- rio->isopen = 1;
-
- init_waitqueue_head(&rio->wait_q);
-
-
- dev_info(&rio->rio_dev->dev, "Rio opened.\n");
- mutex_unlock(&rio500_mutex);
-
- return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- /* against disconnect() */
- mutex_lock(&rio500_mutex);
-
- rio->isopen = 0;
- if (!rio->present) {
- /* cleanup has been delayed */
- kfree(rio->ibuf);
- kfree(rio->obuf);
- rio->ibuf = NULL;
- rio->obuf = NULL;
- } else {
- dev_info(&rio->rio_dev->dev, "Rio closed.\n");
- }
- mutex_unlock(&rio500_mutex);
- return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct RioCommand rio_cmd;
- struct rio_usb_data *rio = &rio_instance;
- void __user *data;
- unsigned char *buffer;
- int result, requesttype;
- int retries;
- int retval=0;
-
- mutex_lock(&rio500_mutex);
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- retval = -ENODEV;
- goto err_out;
- }
-
- switch (cmd) {
- case RIO_RECV_COMMAND:
- data = (void __user *) arg;
- if (data == NULL)
- break;
- if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
- retval = -EFAULT;
- goto err_out;
- }
- if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
- retval = -EINVAL;
- goto err_out;
- }
- buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (buffer == NULL) {
- retval = -ENOMEM;
- goto err_out;
- }
- if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
- retval = -EFAULT;
- free_page((unsigned long) buffer);
- goto err_out;
- }
-
- requesttype = rio_cmd.requesttype | USB_DIR_IN |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- dev_dbg(&rio->rio_dev->dev,
- "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
- requesttype, rio_cmd.request, rio_cmd.value,
- rio_cmd.index, rio_cmd.length);
- /* Send rio control message */
- retries = 3;
- while (retries) {
- result = usb_control_msg(rio->rio_dev,
- usb_rcvctrlpipe(rio-> rio_dev, 0),
- rio_cmd.request,
- requesttype,
- rio_cmd.value,
- rio_cmd.index, buffer,
- rio_cmd.length,
- jiffies_to_msecs(rio_cmd.timeout));
- if (result == -ETIMEDOUT)
- retries--;
- else if (result < 0) {
- dev_err(&rio->rio_dev->dev,
- "Error executing ioctrl. code = %d\n",
- result);
- retries = 0;
- } else {
- dev_dbg(&rio->rio_dev->dev,
- "Executed ioctl. Result = %d (data=%02x)\n",
- result, buffer[0]);
- if (copy_to_user(rio_cmd.buffer, buffer,
- rio_cmd.length)) {
- free_page((unsigned long) buffer);
- retval = -EFAULT;
- goto err_out;
- }
- retries = 0;
- }
-
- /* rio_cmd.buffer contains a raw stream of single byte
- data which has been returned from rio. Data is
- interpreted at application level. For data that
- will be cast to data types longer than 1 byte, data
- will be little_endian and will potentially need to
- be swapped at the app level */
-
- }
- free_page((unsigned long) buffer);
- break;
-
- case RIO_SEND_COMMAND:
- data = (void __user *) arg;
- if (data == NULL)
- break;
- if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
- retval = -EFAULT;
- goto err_out;
- }
- if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
- retval = -EINVAL;
- goto err_out;
- }
- buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (buffer == NULL) {
- retval = -ENOMEM;
- goto err_out;
- }
- if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
- free_page((unsigned long)buffer);
- retval = -EFAULT;
- goto err_out;
- }
-
- requesttype = rio_cmd.requesttype | USB_DIR_OUT |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- dev_dbg(&rio->rio_dev->dev,
- "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
- requesttype, rio_cmd.request, rio_cmd.value,
- rio_cmd.index, rio_cmd.length);
- /* Send rio control message */
- retries = 3;
- while (retries) {
- result = usb_control_msg(rio->rio_dev,
- usb_sndctrlpipe(rio-> rio_dev, 0),
- rio_cmd.request,
- requesttype,
- rio_cmd.value,
- rio_cmd.index, buffer,
- rio_cmd.length,
- jiffies_to_msecs(rio_cmd.timeout));
- if (result == -ETIMEDOUT)
- retries--;
- else if (result < 0) {
- dev_err(&rio->rio_dev->dev,
- "Error executing ioctrl. code = %d\n",
- result);
- retries = 0;
- } else {
- dev_dbg(&rio->rio_dev->dev,
- "Executed ioctl. Result = %d\n", result);
- retries = 0;
-
- }
-
- }
- free_page((unsigned long) buffer);
- break;
-
- default:
- retval = -ENOTTY;
- break;
- }
-
-
-err_out:
- mutex_unlock(&rio500_mutex);
- return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
- size_t count, loff_t * ppos)
-{
- DEFINE_WAIT(wait);
- struct rio_usb_data *rio = &rio_instance;
-
- unsigned long copy_size;
- unsigned long bytes_written = 0;
- unsigned int partial;
-
- int result = 0;
- int maxretry;
- int errn = 0;
- int intr;
-
- intr = mutex_lock_interruptible(&rio500_mutex);
- if (intr)
- return -EINTR;
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- mutex_unlock(&rio500_mutex);
- return -ENODEV;
- }
-
-
-
- do {
- unsigned long thistime;
- char *obuf = rio->obuf;
-
- thistime = copy_size =
- (count >= OBUF_SIZE) ? OBUF_SIZE : count;
- if (copy_from_user(rio->obuf, buffer, copy_size)) {
- errn = -EFAULT;
- goto error;
- }
- maxretry = 5;
- while (thistime) {
- if (!rio->rio_dev) {
- errn = -ENODEV;
- goto error;
- }
- if (signal_pending(current)) {
- mutex_unlock(&rio500_mutex);
- return bytes_written ? bytes_written : -EINTR;
- }
-
- result = usb_bulk_msg(rio->rio_dev,
- usb_sndbulkpipe(rio->rio_dev, 2),
- obuf, thistime, &partial, 5000);
-
- dev_dbg(&rio->rio_dev->dev,
- "write stats: result:%d thistime:%lu partial:%u\n",
- result, thistime, partial);
-
- if (result == -ETIMEDOUT) { /* NAK - so hold for a while */
- if (!maxretry--) {
- errn = -ETIME;
- goto error;
- }
- prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(NAK_TIMEOUT);
- finish_wait(&rio->wait_q, &wait);
- continue;
- } else if (!result && partial) {
- obuf += partial;
- thistime -= partial;
- } else
- break;
- }
- if (result) {
- dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
- result);
- errn = -EIO;
- goto error;
- }
- bytes_written += copy_size;
- count -= copy_size;
- buffer += copy_size;
- } while (count > 0);
-
- mutex_unlock(&rio500_mutex);
-
- return bytes_written ? bytes_written : -EIO;
-
-error:
- mutex_unlock(&rio500_mutex);
- return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
- DEFINE_WAIT(wait);
- struct rio_usb_data *rio = &rio_instance;
- ssize_t read_count;
- unsigned int partial;
- int this_read;
- int result;
- int maxretry = 10;
- char *ibuf;
- int intr;
-
- intr = mutex_lock_interruptible(&rio500_mutex);
- if (intr)
- return -EINTR;
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- mutex_unlock(&rio500_mutex);
- return -ENODEV;
- }
-
- ibuf = rio->ibuf;
-
- read_count = 0;
-
-
- while (count > 0) {
- if (signal_pending(current)) {
- mutex_unlock(&rio500_mutex);
- return read_count ? read_count : -EINTR;
- }
- if (!rio->rio_dev) {
- mutex_unlock(&rio500_mutex);
- return -ENODEV;
- }
- this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
- result = usb_bulk_msg(rio->rio_dev,
- usb_rcvbulkpipe(rio->rio_dev, 1),
- ibuf, this_read, &partial,
- 8000);
-
- dev_dbg(&rio->rio_dev->dev,
- "read stats: result:%d this_read:%u partial:%u\n",
- result, this_read, partial);
-
- if (partial) {
- count = this_read = partial;
- } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
- if (!maxretry--) {
- mutex_unlock(&rio500_mutex);
- dev_err(&rio->rio_dev->dev,
- "read_rio: maxretry timeout\n");
- return -ETIME;
- }
- prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(NAK_TIMEOUT);
- finish_wait(&rio->wait_q, &wait);
- continue;
- } else if (result != -EREMOTEIO) {
- mutex_unlock(&rio500_mutex);
- dev_err(&rio->rio_dev->dev,
- "Read Whoops - result:%d partial:%u this_read:%u\n",
- result, partial, this_read);
- return -EIO;
- } else {
- mutex_unlock(&rio500_mutex);
- return (0);
- }
-
- if (this_read) {
- if (copy_to_user(buffer, ibuf, this_read)) {
- mutex_unlock(&rio500_mutex);
- return -EFAULT;
- }
- count -= this_read;
- read_count += this_read;
- buffer += this_read;
- }
- }
- mutex_unlock(&rio500_mutex);
- return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
- .owner = THIS_MODULE,
- .read = read_rio,
- .write = write_rio,
- .unlocked_ioctl = ioctl_rio,
- .open = open_rio,
- .release = close_rio,
- .llseek = noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
- .name = "rio500%d",
- .fops = &usb_rio_fops,
- .minor_base = RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct rio_usb_data *rio = &rio_instance;
- int retval = -ENOMEM;
- char *ibuf, *obuf;
-
- if (rio->present) {
- dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
- return -EBUSY;
- }
- dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-
- obuf = kmalloc(OBUF_SIZE, GFP_KERNEL);
- if (!obuf) {
- dev_err(&dev->dev,
- "probe_rio: Not enough memory for the output buffer\n");
- goto err_obuf;
- }
- dev_dbg(&intf->dev, "obuf address: %p\n", obuf);
-
- ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL);
- if (!ibuf) {
- dev_err(&dev->dev,
- "probe_rio: Not enough memory for the input buffer\n");
- goto err_ibuf;
- }
- dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf);
-
- mutex_lock(&rio500_mutex);
- rio->rio_dev = dev;
- rio->ibuf = ibuf;
- rio->obuf = obuf;
- rio->present = 1;
- mutex_unlock(&rio500_mutex);
-
- retval = usb_register_dev(intf, &usb_rio_class);
- if (retval) {
- dev_err(&dev->dev,
- "Not able to get a minor for this device.\n");
- goto err_register;
- }
-
- usb_set_intfdata(intf, rio);
- return retval;
-
- err_register:
- mutex_lock(&rio500_mutex);
- rio->present = 0;
- mutex_unlock(&rio500_mutex);
- err_ibuf:
- kfree(obuf);
- err_obuf:
- return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
- struct rio_usb_data *rio = usb_get_intfdata (intf);
-
- usb_set_intfdata (intf, NULL);
- if (rio) {
- usb_deregister_dev(intf, &usb_rio_class);
-
- mutex_lock(&rio500_mutex);
- if (rio->isopen) {
- rio->isopen = 0;
- /* better let it finish - the release will do whats needed */
- rio->rio_dev = NULL;
- mutex_unlock(&rio500_mutex);
- return;
- }
- kfree(rio->ibuf);
- kfree(rio->obuf);
-
- dev_info(&intf->dev, "USB Rio disconnected.\n");
-
- rio->present = 0;
- mutex_unlock(&rio500_mutex);
- }
-}
-
-static const struct usb_device_id rio_table[] = {
- { USB_DEVICE(0x0841, 1) }, /* Rio 500 */
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
- .name = "rio500",
- .probe = probe_rio,
- .disconnect = disconnect_rio,
- .id_table = rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644
index 6db7a5863496..000000000000
--- a/drivers/usb/misc/rio500_usb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* ----------------------------------------------------------------------
- Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar)
- ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND 0x1
-#define RIO_RECV_COMMAND 0x2
-
-#define RIO_DIR_OUT 0x0
-#define RIO_DIR_IN 0x1
-
-struct RioCommand {
- short length;
- int request;
- int requesttype;
- int value;
- int index;
- void __user *buffer;
- int timeout;
-};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 9ba4a4e68d91..61e9e987fe4a 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -29,16 +30,12 @@
#define IOCTL_GET_DRV_VERSION 2
-static DEFINE_MUTEX(lcd_mutex);
static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
-static DEFINE_MUTEX(open_disc_mutex);
-
-
struct usb_lcd {
struct usb_device *udev; /* init: probe_lcd */
struct usb_interface *interface; /* the interface for
@@ -57,6 +54,8 @@ struct usb_lcd {
using up all RAM */
struct usb_anchor submitted; /* URBs to wait for
before suspend */
+ struct rw_semaphore io_rwsem;
+ unsigned long disconnected:1;
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
@@ -81,40 +80,29 @@ static int lcd_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor, r;
- mutex_lock(&lcd_mutex);
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
- mutex_unlock(&lcd_mutex);
- printk(KERN_ERR "USBLCD: %s - error, can't find device for minor %d\n",
+ pr_err("USBLCD: %s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
- mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&open_disc_mutex);
- mutex_unlock(&lcd_mutex);
- return -ENODEV;
- }
/* increment our usage count for the device */
kref_get(&dev->kref);
- mutex_unlock(&open_disc_mutex);
/* grab a power reference */
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
- mutex_unlock(&lcd_mutex);
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
- mutex_unlock(&lcd_mutex);
return 0;
}
@@ -142,6 +130,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
dev = file->private_data;
+ down_read(&dev->io_rwsem);
+
+ if (dev->disconnected) {
+ retval = -ENODEV;
+ goto out_up_io;
+ }
+
/* do a blocking bulk read to get data from the device */
retval = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev,
@@ -158,6 +153,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
retval = bytes_read;
}
+out_up_io:
+ up_read(&dev->io_rwsem);
+
return retval;
}
@@ -173,14 +171,12 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case IOCTL_GET_HARD_VERSION:
- mutex_lock(&lcd_mutex);
bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
sprintf(buf, "%1d%1d.%1d%1d",
(bcdDevice & 0xF000)>>12,
(bcdDevice & 0xF00)>>8,
(bcdDevice & 0xF0)>>4,
(bcdDevice & 0xF));
- mutex_unlock(&lcd_mutex);
if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
@@ -237,11 +233,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
if (r < 0)
return -EINTR;
+ down_read(&dev->io_rwsem);
+
+ if (dev->disconnected) {
+ retval = -ENODEV;
+ goto err_up_io;
+ }
+
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
retval = -ENOMEM;
- goto err_no_buf;
+ goto err_up_io;
}
buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +281,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
the USB core will eventually free it entirely */
usb_free_urb(urb);
+ up_read(&dev->io_rwsem);
exit:
return count;
error_unanchor:
@@ -285,7 +289,8 @@ error_unanchor:
error:
usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+ up_read(&dev->io_rwsem);
up(&dev->limit_sem);
return retval;
}
@@ -325,6 +330,7 @@ static int lcd_probe(struct usb_interface *interface,
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+ init_rwsem(&dev->io_rwsem);
init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -365,7 +371,6 @@ static int lcd_probe(struct usb_interface *interface,
/* something prevented us from registering this driver */
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
- usb_set_intfdata(interface, NULL);
goto error;
}
@@ -411,17 +416,18 @@ static int lcd_resume(struct usb_interface *intf)
static void lcd_disconnect(struct usb_interface *interface)
{
- struct usb_lcd *dev;
+ struct usb_lcd *dev = usb_get_intfdata(interface);
int minor = interface->minor;
- mutex_lock(&open_disc_mutex);
- dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
- mutex_unlock(&open_disc_mutex);
-
/* give back our minor */
usb_deregister_dev(interface, &lcd_class);
+ down_write(&dev->io_rwsem);
+ dev->disconnected = 1;
+ up_write(&dev->io_rwsem);
+
+ usb_kill_anchored_urbs(&dev->submitted);
+
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 6715a128e6c8..be0505b8b5d4 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -60,6 +60,7 @@ struct usb_yurex {
struct kref kref;
struct mutex io_mutex;
+ unsigned long disconnected:1;
struct fasync_struct *async_queue;
wait_queue_head_t waitq;
@@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref)
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
+ usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev);
}
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
switch (status) {
case 0: /*success*/
break;
+ /* The device is terminated or messed up, give up */
case -EOVERFLOW:
dev_err(&dev->interface->dev,
"%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
case -EILSEQ:
- /* The device is terminated, clean up */
+ case -EPROTO:
+ case -ETIME:
return;
default:
dev_err(&dev->interface->dev,
"%s - unknown status received: %d\n", __func__, status);
- goto exit;
+ return;
}
/* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
break;
}
-exit:
retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
init_waitqueue_head(&dev->waitq);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
/* prevent more I/O from starting */
usb_poison_urb(dev->urb);
+ usb_poison_urb(dev->cntl_urb);
mutex_lock(&dev->io_mutex);
- dev->interface = NULL;
+ dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
/* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
dev = file->private_data;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* already disconnected */
+ if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
return -ENODEV;
}
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
goto error;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* already disconnected */
+ if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index d1a0a35ecfff..0824099b905e 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -211,6 +211,7 @@ struct usbhs_priv;
/* DCPCTR */
#define BSTS (1 << 15) /* Buffer Status */
#define SUREQ (1 << 14) /* Sending SETUP Token */
+#define INBUFM (1 << 14) /* (PIPEnCTR) Transfer Buffer Monitor */
#define CSSTS (1 << 12) /* CSSTS Status */
#define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */
#define SQCLR (1 << 8) /* Toggle Bit Clear */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 2a01ceb71641..86637cd066cf 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
list_del_init(&pkt->node);
}
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
{
return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
}
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 88d1816bcda2..c3d3cc35cee0 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void *buf, int len, int zero, int sequence);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
#endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4d571a5205e2..e5ef56991dba 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -722,8 +722,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
unsigned long flags;
-
- usbhsg_pipe_disable(uep);
+ int ret = 0;
dev_dbg(dev, "set halt %d (pipe %d)\n",
halt, usbhs_pipe_number(pipe));
@@ -731,6 +730,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
/******************** spin lock ********************/
usbhs_lock(priv, flags);
+ /*
+ * According to usb_ep_set_halt()'s description, this function should
+ * return -EAGAIN if the IN endpoint has any queue or data. Note
+ * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+ * IN endpoint in the gadget mode.
+ */
+ if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+ usbhs_pipe_contains_transmittable_data(pipe))) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
if (halt)
usbhs_pipe_stall(pipe);
else
@@ -741,10 +752,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
+out:
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
- return 0;
+ return ret;
}
static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c4922b96c93b..9e5afdde1adb 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
return -EBUSY;
}
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+ u16 val;
+
+ /* Do not support for DCP pipe */
+ if (usbhs_pipe_is_dcp(pipe))
+ return false;
+
+ val = usbhsp_pipectrl_get(pipe);
+ if (val & INBUFM)
+ return true;
+
+ return false;
+}
+
/*
* PID ctrl
*/
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3080423e600c..3b130529408b 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
int needs_bfre, int bfre_enable);
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index f0688c44b04c..25e81faf4c24 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1030,6 +1030,9 @@ static const struct usb_device_id id_table_combined[] = {
/* EZPrototypes devices */
{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
{ USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+ /* Sienna devices */
+ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+ { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f12d806220b4..22d66217cb41 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -39,6 +39,9 @@
#define FTDI_LUMEL_PD12_PID 0x6002
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID 0x8348
+
/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
#define CYBER_CORTEX_AV_PID 0x8698
@@ -689,6 +692,12 @@
#define BANDB_ZZ_PROG1_USB_PID 0xBA02
/*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID 0x0920
+#define ECHELON_U20_PID 0x7500
+
+/*
* Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
*/
#define INTREPID_VID 0x093C
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index d34779fe4a8d..e66a59ef43a1 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
ep_desc = find_ep(serial, endpoint);
if (!ep_desc) {
- /* leak the urb, something's wrong and the callers don't care */
- return urb;
+ usb_free_urb(urb);
+ return NULL;
}
if (usb_endpoint_xfer_int(ep_desc)) {
ep_type_name = "INT";
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 38e920ac7f82..06ab016be0b6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
+#define CINTERION_PRODUCT_CLS8 0x00b0
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1154,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1847,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index dd0ad67aa71e..ef23acc9b9ce 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
struct ti_port *tport;
int port_number;
int status;
- int do_unlock;
unsigned long flags;
tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
"%s - cannot send close port command, %d\n"
, __func__, status);
- /* if mutex_lock is interrupted, continue anyway */
- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
- --tport->tp_tdev->td_open_port_count;
- if (tport->tp_tdev->td_open_port_count <= 0) {
+ mutex_lock(&tdev->td_open_close_lock);
+ --tdev->td_open_port_count;
+ if (tdev->td_open_port_count == 0) {
/* last port is closed, shut down interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
- tport->tp_tdev->td_open_port_count = 0;
}
- if (do_unlock)
- mutex_unlock(&tdev->td_open_close_lock);
+ mutex_unlock(&tdev->td_open_close_lock);
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index a3179fea38c8..8f066bb55d7d 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -314,10 +314,7 @@ static void serial_cleanup(struct tty_struct *tty)
serial = port->serial;
owner = serial->type->driver.owner;
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- usb_autopm_put_interface(serial->interface);
- mutex_unlock(&serial->disc_mutex);
+ usb_autopm_put_interface(serial->interface);
usb_serial_put(serial);
module_put(owner);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 96562744101c..5f61d9977a15 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4409,18 +4409,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
/* USB data support is optional */
ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
if (ret == 0) {
- port->typec_caps.data = typec_find_port_data_role(cap_str);
- if (port->typec_caps.data < 0)
- return -EINVAL;
+ ret = typec_find_port_data_role(cap_str);
+ if (ret < 0)
+ return ret;
+ port->typec_caps.data = ret;
}
ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
if (ret < 0)
return ret;
- port->typec_caps.type = typec_find_port_power_role(cap_str);
- if (port->typec_caps.type < 0)
- return -EINVAL;
+ ret = typec_find_port_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+ port->typec_caps.type = ret;
port->port_type = port->typec_caps.type;
if (port->port_type == TYPEC_PORT_SNK)
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 6c103697c582..d99700cb4dca 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -75,6 +75,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
if (cur != 0xff) {
mutex_unlock(&dp->con->lock);
+ if (dp->con->port_altmode[cur] == alt)
+ return 0;
return -EBUSY;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 907e20e1a71e..d772fce51905 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -195,7 +195,6 @@ struct ucsi_ccg {
/* fw build with vendor information */
u16 fw_build;
- bool run_isr; /* flag to call ISR routine during resume */
struct work_struct pm_work;
};
@@ -224,18 +223,6 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
if (quirks && quirks->max_read_len)
max_read_len = quirks->max_read_len;
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
- uc->fw_version <= CCG_OLD_FW_VERSION) {
- mutex_lock(&uc->lock);
- /*
- * Do not schedule pm_work to run ISR in
- * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
- * since we are already in ISR path.
- */
- uc->run_isr = false;
- mutex_unlock(&uc->lock);
- }
-
pm_runtime_get_sync(uc->dev);
while (rem_len > 0) {
msgs[1].buf = &data[len - rem_len];
@@ -278,18 +265,6 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
msgs[0].len = len + sizeof(rab);
msgs[0].buf = buf;
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
- uc->fw_version <= CCG_OLD_FW_VERSION) {
- mutex_lock(&uc->lock);
- /*
- * Do not schedule pm_work to run ISR in
- * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
- * since we are already in ISR path.
- */
- uc->run_isr = false;
- mutex_unlock(&uc->lock);
- }
-
pm_runtime_get_sync(uc->dev);
status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (status < 0) {
@@ -1130,7 +1105,6 @@ static int ucsi_ccg_probe(struct i2c_client *client,
uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
- uc->run_isr = true;
mutex_init(&uc->lock);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1188,6 +1162,8 @@ static int ucsi_ccg_probe(struct i2c_client *client,
pm_runtime_set_active(uc->dev);
pm_runtime_enable(uc->dev);
+ pm_runtime_use_autosuspend(uc->dev);
+ pm_runtime_set_autosuspend_delay(uc->dev, 5000);
pm_runtime_idle(uc->dev);
return 0;
@@ -1229,7 +1205,6 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ucsi_ccg *uc = i2c_get_clientdata(client);
- bool schedule = true;
/*
* Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
@@ -1237,17 +1212,8 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
* Schedule a work to call ISR as a workaround.
*/
if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
- uc->fw_version <= CCG_OLD_FW_VERSION) {
- mutex_lock(&uc->lock);
- if (!uc->run_isr) {
- uc->run_isr = true;
- schedule = false;
- }
- mutex_unlock(&uc->lock);
-
- if (schedule)
- schedule_work(&uc->pm_work);
- }
+ uc->fw_version <= CCG_OLD_FW_VERSION)
+ schedule_work(&uc->pm_work);
return 0;
}
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index c31d17d05810..2dc58766273a 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -61,6 +61,7 @@ struct usb_skel {
spinlock_t err_lock; /* lock for errors */
struct kref kref;
struct mutex io_mutex; /* synchronize I/O with disconnect */
+ unsigned long disconnected:1;
wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -73,6 +74,7 @@ static void skel_delete(struct kref *kref)
struct usb_skel *dev = to_skel_dev(kref);
usb_free_urb(dev->bulk_in_urb);
+ usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
@@ -124,10 +126,7 @@ static int skel_release(struct inode *inode, struct file *file)
return -ENODEV;
/* allow the device to be autosuspended */
- mutex_lock(&dev->io_mutex);
- if (dev->interface)
- usb_autopm_put_interface(dev->interface);
- mutex_unlock(&dev->io_mutex);
+ usb_autopm_put_interface(dev->interface);
/* decrement the count on our device */
kref_put(&dev->kref, skel_delete);
@@ -231,8 +230,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
dev = file->private_data;
- /* if we cannot read at all, return EOF */
- if (!dev->bulk_in_urb || !count)
+ if (!count)
return 0;
/* no concurrent readers */
@@ -240,7 +238,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
if (rv < 0)
return rv;
- if (!dev->interface) { /* disconnect() was called */
+ if (dev->disconnected) { /* disconnect() was called */
rv = -ENODEV;
goto exit;
}
@@ -422,7 +420,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
/* this lock makes sure we don't submit URBs to gone devices */
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* disconnect() was called */
+ if (dev->disconnected) { /* disconnect() was called */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
@@ -507,7 +505,7 @@ static int skel_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->bulk_in_wait);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
@@ -573,9 +571,10 @@ static void skel_disconnect(struct usb_interface *interface)
/* prevent more I/O from starting */
mutex_lock(&dev->io_mutex);
- dev->interface = NULL;
+ dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
+ usb_kill_urb(dev->bulk_in_urb);
usb_kill_anchored_urbs(&dev->submitted);
/* decrement our usage count */
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 585a84d319bd..65850e9c7190 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1195,12 +1195,12 @@ static int vhci_start(struct usb_hcd *hcd)
if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
err = vhci_init_attr_group();
if (err) {
- pr_err("init attr group\n");
+ dev_err(hcd_dev(hcd), "init attr group failed, err = %d\n", err);
return err;
}
err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
if (err) {
- pr_err("create sysfs files\n");
+ dev_err(hcd_dev(hcd), "create sysfs files failed, err = %d\n", err);
vhci_finish_attr_group();
return err;
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 96fddc1dafc3..d864277ea16f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1658,7 +1658,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct bus_type *bus = NULL;
int ret;
bool resv_msi, msi_remap;
- phys_addr_t resv_msi_base;
+ phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 7804869c6a31..056308008288 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -161,6 +161,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_stop(n, &private);
vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev);
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
@@ -237,6 +238,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
+ vhost_dev_stop(&n->dev);
vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 08ad0d1f0476..a0a2d74967ef 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -852,6 +852,12 @@ static inline int xfer_kern(void *src, void *dst, size_t len)
return 0;
}
+static inline int kern_xfer(void *dst, void *src, size_t len)
+{
+ memcpy(dst, src, len);
+ return 0;
+}
+
/**
* vringh_init_kern - initialize a vringh for a kernelspace vring.
* @vrh: the vringh to initialize.
@@ -958,7 +964,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
+ return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
}
EXPORT_SYMBOL(vringh_iov_push_kern);
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 228a89b9bdd1..16f60c1e1766 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -18,23 +18,6 @@ obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
# How to generate logo's
-# Use logo-cfiles to retrieve list of .c files to be built
-logo-cfiles = $(notdir $(patsubst %.$(2), %.c, \
- $(wildcard $(srctree)/$(src)/*$(1).$(2))))
-
-
-# Mono logos
-extra-y += $(call logo-cfiles,_mono,pbm)
-
-# VGA16 logos
-extra-y += $(call logo-cfiles,_vga16,ppm)
-
-# 224 Logos
-extra-y += $(call logo-cfiles,_clut224,ppm)
-
-# Gray 256
-extra-y += $(call logo-cfiles,_gray256,pgm)
-
pnmtologo := scripts/pnmtologo
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
@@ -55,5 +38,5 @@ $(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
$(call if_changed,logo)
-# Files generated that shall be removed upon make clean
-clean-files := *.o *_mono.c *_vga16.c *_clut224.c *_gray256.c
+# generated C files
+targets += *_mono.c *_vga16.c *_clut224.c *_gray256.c
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 75fd140b02ff..43c391626a00 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr(
if (!bounce_buf)
return -ENOMEM;
+ *bounce_buf_ret = bounce_buf;
+
if (copy_in) {
ret = copy_from_user(bounce_buf, (void __user *)buf, len);
if (ret)
@@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr(
memset(bounce_buf, 0, len);
}
- *bounce_buf_ret = bounce_buf;
hgcm_call_add_pagelist_size(bounce_buf, len, extra);
return 0;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index bdc08244a648..a8041e451e9e 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1499,9 +1499,6 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
* counter first before updating event flags.
*/
virtio_wmb(vq->weak_barriers);
- } else {
- used_idx = vq->last_used_idx;
- wrap_counter = vq->packed.used_wrap_counter;
}
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
@@ -1518,7 +1515,9 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
*/
virtio_mb(vq->weak_barriers);
- if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
+ if (is_used_desc_packed(vq,
+ vq->last_used_idx,
+ vq->packed.used_wrap_counter)) {
END_USE(vq);
return false;
}
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index ebed495b9e69..b7847636501d 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -103,6 +103,7 @@ config W1_SLAVE_DS2438
config W1_SLAVE_DS250X
tristate "512b/1kb/16kb EPROM family support"
+ select CRC16
help
Say Y here if you want to use a 1-wire
512b/1kb/16kb EPROM family device (DS250x).
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 4e11de6cde81..5bae515c8e25 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -156,8 +156,10 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
/* balloon_append: add the given page to the balloon. */
-static void __balloon_append(struct page *page)
+static void balloon_append(struct page *page)
{
+ __SetPageOffline(page);
+
/* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) {
list_add_tail(&page->lru, &ballooned_pages);
@@ -169,11 +171,6 @@ static void __balloon_append(struct page *page)
wake_up(&balloon_wq);
}
-static void balloon_append(struct page *page)
-{
- __balloon_append(page);
-}
-
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
static struct page *balloon_retrieve(bool require_lowmem)
{
@@ -192,6 +189,7 @@ static struct page *balloon_retrieve(bool require_lowmem)
else
balloon_stats.balloon_low--;
+ __ClearPageOffline(page);
return page;
}
@@ -377,8 +375,7 @@ static void xen_online_page(struct page *page, unsigned int order)
for (i = 0; i < size; i++) {
p = pfn_to_page(start_pfn + i);
__online_page_set_limits(p);
- __SetPageOffline(p);
- __balloon_append(p);
+ balloon_append(p);
}
mutex_unlock(&balloon_mutex);
}
@@ -444,7 +441,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
/* Relinquish the page back to the allocator. */
- __ClearPageOffline(page);
free_reserved_page(page);
}
@@ -471,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
- __SetPageOffline(page);
adjust_managed_page_count(page, -1);
xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages);
@@ -611,7 +606,6 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
while (pgno < nr_pages) {
page = balloon_retrieve(true);
if (page) {
- __ClearPageOffline(page);
pages[pgno++] = page;
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
@@ -653,10 +647,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
mutex_lock(&balloon_mutex);
for (i = 0; i < nr_pages; i++) {
- if (pages[i]) {
- __SetPageOffline(pages[i]);
+ if (pages[i])
balloon_append(pages[i]);
- }
}
balloon_stats.target_unpopulated -= nr_pages;
@@ -674,7 +666,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
unsigned long pfn, extra_pfn_end;
- struct page *page;
/*
* If the amount of usable memory has been limited (e.g., with
@@ -684,11 +675,10 @@ static void __init balloon_add_region(unsigned long start_pfn,
extra_pfn_end = min(max_pfn, start_pfn + pages);
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
- page = pfn_to_page(pfn);
/* totalram_pages and totalhigh_pages do not
include the boot-time balloon extension, so
don't subtract from it. */
- __balloon_append(page);
+ balloon_append(pfn_to_page(pfn));
}
balloon_stats.total_pages += extra_pfn_end - start_pfn;
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index 89d60f8e3c18..d1ff2186ebb4 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -40,7 +40,7 @@
#define efi_data(op) (op.u.efi_runtime_call)
-efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
struct xen_platform_op op = INIT_EFI_OP(get_time);
@@ -61,9 +61,8 @@ efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_get_time);
-efi_status_t xen_efi_set_time(efi_time_t *tm)
+static efi_status_t xen_efi_set_time(efi_time_t *tm)
{
struct xen_platform_op op = INIT_EFI_OP(set_time);
@@ -75,10 +74,10 @@ efi_status_t xen_efi_set_time(efi_time_t *tm)
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_set_time);
-efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
- efi_time_t *tm)
+static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
+ efi_bool_t *pending,
+ efi_time_t *tm)
{
struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);
@@ -98,9 +97,8 @@ efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time);
-efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);
@@ -117,11 +115,10 @@ efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time);
-efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
- u32 *attr, unsigned long *data_size,
- void *data)
+static efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *data_size,
+ void *data)
{
struct xen_platform_op op = INIT_EFI_OP(get_variable);
@@ -141,11 +138,10 @@ efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_get_variable);
-efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
- efi_char16_t *name,
- efi_guid_t *vendor)
+static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
{
struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);
@@ -165,11 +161,10 @@ efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_get_next_variable);
-efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
- u32 attr, unsigned long data_size,
- void *data)
+static efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
{
struct xen_platform_op op = INIT_EFI_OP(set_variable);
@@ -186,11 +181,10 @@ efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_set_variable);
-efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
- u64 *remaining_space,
- u64 *max_variable_size)
+static efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
{
struct xen_platform_op op = INIT_EFI_OP(query_variable_info);
@@ -208,9 +202,8 @@ efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_query_variable_info);
-efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
+static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
{
struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);
@@ -221,10 +214,9 @@ efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count);
-efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
- unsigned long count, unsigned long sg_list)
+static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count, unsigned long sg_list)
{
struct xen_platform_op op = INIT_EFI_OP(update_capsule);
@@ -241,11 +233,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_update_capsule);
-efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
- unsigned long count, u64 *max_size,
- int *reset_type)
+static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count, u64 *max_size, int *reset_type)
{
struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);
@@ -264,10 +254,9 @@ efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
return efi_data(op).status;
}
-EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
-void xen_efi_reset_system(int reset_type, efi_status_t status,
- unsigned long data_size, efi_char16_t *data)
+static void xen_efi_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data)
{
switch (reset_type) {
case EFI_RESET_COLD:
@@ -281,4 +270,25 @@ void xen_efi_reset_system(int reset_type, efi_status_t status,
BUG();
}
}
-EXPORT_SYMBOL_GPL(xen_efi_reset_system);
+
+/*
+ * Set XEN EFI runtime services function pointers. Other fields of struct efi,
+ * e.g. efi.systab, will be set like normal EFI.
+ */
+void __init xen_efi_runtime_setup(void)
+{
+ efi.get_time = xen_efi_get_time;
+ efi.set_time = xen_efi_set_time;
+ efi.get_wakeup_time = xen_efi_get_wakeup_time;
+ efi.set_wakeup_time = xen_efi_set_wakeup_time;
+ efi.get_variable = xen_efi_get_variable;
+ efi.get_next_variable = xen_efi_get_next_variable;
+ efi.set_variable = xen_efi_set_variable;
+ efi.set_variable_nonblocking = xen_efi_set_variable;
+ efi.query_variable_info = xen_efi_query_variable_info;
+ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
+ efi.update_capsule = xen_efi_update_capsule;
+ efi.query_capsule_caps = xen_efi_query_capsule_caps;
+ efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
+ efi.reset_system = xen_efi_reset_system;
+}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index a446a7221e13..81401f386c9c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -34,9 +35,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
-#include <linux/of_device.h>
-#endif
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -625,14 +623,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
flip->private_data = priv;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
priv->dma_dev = gntdev_miscdev.this_device;
-
- /*
- * The device is not spawn from a device tree, so arch_setup_dma_ops
- * is not called, thus leaving the device with dummy DMA ops.
- * Fix this by calling of_dma_configure() with a NULL node to set
- * default DMA ops.
- */
- of_dma_configure(priv->dma_dev, NULL, true);
+ dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
#endif
pr_debug("priv %p\n", priv);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7ea6fb6a2e5d..49b381e104ef 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1363,8 +1363,7 @@ static int gnttab_setup(void)
if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
if (gnttab_shared.addr == NULL) {
- pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
- (unsigned long)xen_auto_xlat_grant_frames.vaddr);
+ pr_warn("gnttab share frames is not mapped!\n");
return -ENOMEM;
}
}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 69a626b0e594..c57c71b7d53d 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev,
mappass->reqcopy = *req;
icsk = inet_csk(mappass->sock->sk);
queue = &icsk->icsk_accept_queue;
- data = queue->rskq_accept_head != NULL;
+ data = READ_ONCE(queue->rskq_accept_head) != NULL;
if (data) {
mappass->reqcopy.cmd = 0;
ret = 0;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 08adc590f631..597af455a522 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,6 +55,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
+#include <linux/workqueue.h>
#include <xen/xenbus.h>
#include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
wait_queue_head_t read_waitq;
struct kref kref;
+
+ struct work_struct wq;
};
/* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
mutex_unlock(&adap->dev_data->reply_mutex);
}
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
{
struct xenbus_file_priv *u;
struct xenbus_transaction_holder *trans, *tmp;
struct watch_adapter *watch, *tmp_watch;
struct read_buffer *rb, *tmp_rb;
- u = container_of(kref, struct xenbus_file_priv, kref);
+ u = container_of(wq, struct xenbus_file_priv, wq);
/*
* No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
kfree(u);
}
+static void xenbus_file_free(struct kref *kref)
+{
+ struct xenbus_file_priv *u;
+
+ /*
+ * We might be called in xenbus_thread().
+ * Use workqueue to avoid deadlock.
+ */
+ u = container_of(kref, struct xenbus_file_priv, kref);
+ schedule_work(&u->wq);
+}
+
static struct xenbus_transaction_holder *xenbus_get_transaction(
struct xenbus_file_priv *u, uint32_t tx_id)
{
@@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&u->watches);
INIT_LIST_HEAD(&u->read_buffers);
init_waitqueue_head(&u->read_waitq);
+ INIT_WORK(&u->wq, xenbus_worker);
mutex_init(&u->reply_mutex);
mutex_init(&u->msgbuffer_mutex);